mirror of
https://github.com/vortexgpgpu/vortex.git
synced 2025-04-23 21:39:10 -04:00
Merge Vortex 2.2
This commit is contained in:
parent
2271d2b286
commit
02091f3d44
15 changed files with 512 additions and 462 deletions
|
@ -33,6 +33,9 @@
|
|||
`endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
`ifndef VM_DISABLE
|
||||
`define VM_ENABLE
|
||||
`endif
|
||||
|
||||
`ifndef EXT_M_DISABLE
|
||||
`define EXT_M_ENABLE
|
||||
|
@ -172,12 +175,11 @@
|
|||
`define IO_BASE_ADDR 64'h000000040
|
||||
`endif
|
||||
|
||||
`ifdef VM_ENABLE
|
||||
`ifndef PAGE_TABLE_BASE_ADDR
|
||||
`define PAGE_TABLE_BASE_ADDR 64'h1F0000000
|
||||
`endif
|
||||
|
||||
`ifndef PAGE_TABLE_SIZE
|
||||
`define PAGE_TABLE_SIZE 4096
|
||||
`endif
|
||||
|
||||
`else // XLEN_32
|
||||
|
@ -198,12 +200,11 @@
|
|||
`define IO_BASE_ADDR 32'h00000040
|
||||
`endif
|
||||
|
||||
`ifdef VM_ENABLE
|
||||
`ifndef PAGE_TABLE_BASE_ADDR
|
||||
`define PAGE_TABLE_BASE_ADDR 32'hF0000000
|
||||
`endif
|
||||
|
||||
`ifndef PAGE_TABLE_SIZE
|
||||
`define PAGE_TABLE_SIZE 4096
|
||||
`endif
|
||||
|
||||
`endif
|
||||
|
@ -271,40 +272,58 @@
|
|||
`endif
|
||||
|
||||
// Virtual Memory Configuration ///////////////////////////////////////////////////////
|
||||
`ifndef VM_DISABLE
|
||||
`define VM_ENABLE
|
||||
`endif
|
||||
`ifdef VM_ENABLE
|
||||
`ifndef VM_ADDR_MODE
|
||||
`define VM_ADDR_MODE SV32
|
||||
`endif
|
||||
|
||||
`ifndef PTE_SIZE
|
||||
`ifdef XLEN_32
|
||||
`define PTE_SIZE 4
|
||||
`define NUM_PTE_ENTRY 1024
|
||||
`else
|
||||
`ifdef XLEN_64
|
||||
`define PTE_SIZE 8
|
||||
`define NUM_PTE_ENTRY 1024
|
||||
`else
|
||||
`define PTE_SIZE 8
|
||||
`define NUM_PTE_ENTRY 1024
|
||||
`endif
|
||||
`ifdef XLEN_32
|
||||
`ifndef VM_ADDR_MODE
|
||||
`define VM_ADDR_MODE SV32 //or BARE
|
||||
`endif
|
||||
`ifndef PTE_SIZE
|
||||
`define PTE_SIZE (4)
|
||||
`endif
|
||||
`ifndef SATP_MODE_IDX
|
||||
`define SATP_MODE_IDX (31)
|
||||
`endif
|
||||
`ifndef SATP_PPN_WIDTH
|
||||
`define SATP_PPN_WIDTH (22)
|
||||
`endif
|
||||
`else
|
||||
`ifndef VM_ADDR_MODE
|
||||
`define VM_ADDR_MODE SV64 //or BARE
|
||||
`endif
|
||||
`ifndef PTE_SIZE
|
||||
`define PTE_SIZE (8)
|
||||
`endif
|
||||
`ifndef SATP_MODE_IDX
|
||||
`define SATP_MODE_IDX (63)
|
||||
`endif
|
||||
`ifndef SATP_PPN_WIDTH
|
||||
`define SATP_PPN_WIDTH (44)
|
||||
`endif
|
||||
`define PT_SIZE (PTE_SIZE * NUM_PTE_ENTRY)
|
||||
`endif
|
||||
|
||||
`ifndef NUM_PTE_ENTRY
|
||||
`define NUM_PTE_ENTRY (1024)
|
||||
`endif
|
||||
|
||||
`ifndef PT_SIZE
|
||||
`define PT_SIZE (PTE_SIZE * NUM_PTE_ENTRY)
|
||||
`endif
|
||||
|
||||
`ifndef PT_TOTAL_SIZE
|
||||
`define PT_TOTAL_SIZE (PT_SIZE*(1+NUM_PTE_ENTRY))
|
||||
`endif
|
||||
|
||||
|
||||
`ifndef TLB_SIZE
|
||||
`define TLB_SIZE 32
|
||||
`endif
|
||||
|
||||
`ifndef SUPER_PAGING
|
||||
`define SUPER_PAGING 0
|
||||
`define TLB_SIZE (32)
|
||||
`endif
|
||||
|
||||
`endif
|
||||
|
||||
`ifndef MEM_PAGE_SIZE
|
||||
`define MEM_PAGE_SIZE (4096)
|
||||
`endif
|
||||
|
||||
// Pipeline Configuration /////////////////////////////////////////////////////
|
||||
|
||||
// Issue width
|
||||
|
|
|
@ -28,11 +28,11 @@
|
|||
#include <chrono>
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
#include <vortex.h>
|
||||
#include <utils.h>
|
||||
#include <VX_config.h>
|
||||
// #include <vortex.h>
|
||||
//#include <utils.h>
|
||||
#include <malloc.h>
|
||||
|
||||
#include <VX_config.h>
|
||||
#include <VX_types.h>
|
||||
|
||||
#include <util.h>
|
||||
|
@ -50,7 +50,6 @@
|
|||
using namespace vortex;
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define DBGPRINT(format, ...) do { printf("[VXDRV] " format "", ##__VA_ARGS__); } while (0)
|
||||
#else
|
||||
|
@ -85,13 +84,9 @@ class vx_device {
|
|||
public:
|
||||
vx_device()
|
||||
: arch_(NUM_THREADS, NUM_WARPS, NUM_CORES)
|
||||
#ifdef VM_ENABLE
|
||||
, ram_(0, RAM_PAGE_SIZE<<11)
|
||||
#else
|
||||
, ram_(0, RAM_PAGE_SIZE)
|
||||
#endif
|
||||
, ram_(0, MEM_PAGE_SIZE)
|
||||
, processor_(arch_)
|
||||
, global_mem_(ALLOC_BASE_ADDR, GLOBAL_MEM_SIZE - ALLOC_BASE_ADDR, RAM_PAGE_SIZE, CACHE_BLOCK_SIZE)
|
||||
, global_mem_(ALLOC_BASE_ADDR, GLOBAL_MEM_SIZE - ALLOC_BASE_ADDR, MEM_PAGE_SIZE, CACHE_BLOCK_SIZE)
|
||||
{
|
||||
// attach memory module
|
||||
processor_.attach_ram(&ram_);
|
||||
|
@ -150,133 +145,141 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
// virtual to phycial mapping
|
||||
uint64_t map_p2v(uint64_t pAddr)
|
||||
{
|
||||
return pAddr + 0xf000000;
|
||||
}
|
||||
bool need_trans(uint64_t dev_pAddr)
|
||||
{
|
||||
// Check if the this is the BARE mode
|
||||
bool isBAREMode = (get_mode() == VA_MODE::BARE);
|
||||
// Check if the address is reserved
|
||||
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
|
||||
// Check if the address falls within the startup address range
|
||||
bool isStartAddress = (STARTUP_ADDR <= dev_pAddr) && (dev_pAddr <= (STARTUP_ADDR + 0x40000));
|
||||
|
||||
// Print the boolean results for debugging purposes
|
||||
// printf("%p, %u, %u\n", (void *)dev_pAddr, isReserved, isStartAddress);
|
||||
|
||||
// Return true if the address needs translation (i.e., it's not reserved and not a start address)
|
||||
return (!isBAREMode && !isReserved && !isStartAddress);
|
||||
}
|
||||
|
||||
uint64_t phy_to_virt_map(uint64_t size, uint64_t* dev_pAddr, uint32_t flags)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT("(size = 0x%lx, dev_pAddr= 0x%lx, flags = 0x%x)\n", size, *dev_pAddr, flags);
|
||||
DBGPRINT("bit mode: %d\n", XLEN);
|
||||
|
||||
// if (*dev_pAddr == STARTUP_ADDR || *dev_pAddr == 0x7FFFF000) {
|
||||
|
||||
if (!need_trans(*dev_pAddr))
|
||||
{
|
||||
DBGPRINT("Translation is not needed.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t init_pAddr = *dev_pAddr;
|
||||
uint64_t init_vAddr = map_p2v(init_pAddr);
|
||||
uint64_t ppn = 0, vpn = 0 ;
|
||||
|
||||
//dev_pAddr can be of size greater than a page, but we have to map and update
|
||||
//page tables on a page table granularity. So divide the allocation into pages.
|
||||
bool is_start = false;
|
||||
for (ppn = (*dev_pAddr) >> 12; ppn < ((*dev_pAddr) >> 12) + (size/RAM_PAGE_SIZE) + 1; ppn++)
|
||||
{
|
||||
vpn = map_p2v(ppn << 12) >> 12;
|
||||
if (is_start == false) {
|
||||
DBGPRINT("**Search vpn in page table:0x%lx\n", vpn);
|
||||
is_start = true;
|
||||
}
|
||||
else {
|
||||
DBGPRINT("Next vpn: 0x%lx\n",vpn);
|
||||
}
|
||||
|
||||
//Currently a 1-1 mapping is used, this can be changed here to support different
|
||||
//mapping schemes
|
||||
|
||||
//If ppn to vpn mapping doesnt exist.
|
||||
if (addr_mapping.find(vpn) == addr_mapping.end())
|
||||
{
|
||||
//Create mapping.
|
||||
update_page_table(ppn, vpn, flags);
|
||||
addr_mapping[vpn] = ppn;
|
||||
}
|
||||
}
|
||||
DBGPRINT("Mapped virtual addr: 0x%lx to physical addr: %lx\n", init_vAddr, init_pAddr);
|
||||
|
||||
// Sanity check
|
||||
uint64_t pAddr = page_table_walk(init_vAddr);
|
||||
if (pAddr != init_pAddr)
|
||||
{
|
||||
assert(pAddr == init_pAddr && "ERROR: translated virtual Addresses are not the same with physical Address");
|
||||
}
|
||||
|
||||
*dev_pAddr = init_vAddr; // commit vpn to be returned to host
|
||||
DBGPRINT("Translated device virtual addr: 0x%lx\n", *dev_pAddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int mem_alloc(uint64_t size, int flags, uint64_t* dev_addr) {
|
||||
|
||||
uint64_t addr;
|
||||
DBGPRINT("mem_alloc size: 0x%lx\n",size);
|
||||
CHECK_ERR(global_mem_.allocate(size, &addr), {
|
||||
return err;
|
||||
});
|
||||
CHECK_ERR(this->mem_access(addr, size, flags), {
|
||||
global_mem_.release(addr);
|
||||
return err;
|
||||
});
|
||||
*dev_addr = addr;
|
||||
#ifdef VM_ENABLE
|
||||
// VM address translation
|
||||
phy_to_virt_map(size, dev_addr,flags);
|
||||
// virtual to phycial mapping
|
||||
uint64_t map_p2v(uint64_t pAddr)
|
||||
{
|
||||
return pAddr + 0xf000000;
|
||||
}
|
||||
bool need_trans(uint64_t dev_pAddr)
|
||||
{
|
||||
// Check if the this is the BARE mode
|
||||
bool isBAREMode = (get_mode() == VA_MODE::BARE);
|
||||
// Check if the address is reserved for system usage
|
||||
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
|
||||
// Check if the address is reserved for IO usage
|
||||
bool isIO = (dev_pAddr < USER_BASE_ADDR);
|
||||
// Check if the address falls within the startup address range
|
||||
bool isStartAddress = (STARTUP_ADDR <= dev_pAddr) && (dev_pAddr <= (STARTUP_ADDR + 0x40000));
|
||||
|
||||
// Print the boolean results for debugging purposes
|
||||
// printf("%p, %u, %u\n", (void *)dev_pAddr, isReserved, isStartAddress);
|
||||
|
||||
// Return true if the address needs translation (i.e., it's not reserved and not a start address)
|
||||
return (!isBAREMode && !isReserved && !isIO && !isStartAddress);
|
||||
}
|
||||
|
||||
uint64_t phy_to_virt_map(uint64_t size, uint64_t *dev_pAddr, uint32_t flags)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT(" [RT:PTV_MAP] size = 0x%lx, dev_pAddr= 0x%lx, flags = 0x%x\n", size, *dev_pAddr, flags);
|
||||
DBGPRINT(" [RT:PTV_MAP] bit mode: %d\n", XLEN);
|
||||
|
||||
// if (*dev_pAddr == STARTUP_ADDR || *dev_pAddr == 0x7FFFF000) {
|
||||
|
||||
if (!need_trans(*dev_pAddr))
|
||||
{
|
||||
DBGPRINT(" [RT:PTV_MAP] Translation is not needed.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t init_pAddr = *dev_pAddr;
|
||||
uint64_t init_vAddr = map_p2v(init_pAddr);
|
||||
uint64_t ppn = 0, vpn = 0;
|
||||
|
||||
// dev_pAddr can be of size greater than a page, but we have to map and update
|
||||
// page tables on a page table granularity. So divide the allocation into pages.
|
||||
bool is_start = false;
|
||||
for (ppn = (*dev_pAddr) >> 12; ppn < ((*dev_pAddr) >> 12) + (size / MEM_PAGE_SIZE) + 1; ppn++)
|
||||
{
|
||||
vpn = map_p2v(ppn << 12) >> 12;
|
||||
if (is_start == false)
|
||||
{
|
||||
DBGPRINT(" [RT:PTV_MAP] Search vpn in page table:0x%lx\n", vpn);
|
||||
is_start = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBGPRINT(" [RT:PTV_MAP] Next vpn: 0x%lx\n", vpn);
|
||||
}
|
||||
|
||||
// Currently a 1-1 mapping is used, this can be changed here to support different
|
||||
// mapping schemes
|
||||
|
||||
// If ppn to vpn mapping doesnt exist.
|
||||
if (addr_mapping.find(vpn) == addr_mapping.end())
|
||||
{
|
||||
// Create mapping.
|
||||
update_page_table(ppn, vpn, flags);
|
||||
addr_mapping[vpn] = ppn;
|
||||
}
|
||||
}
|
||||
DBGPRINT(" [RT:PTV_MAP] Mapped virtual addr: 0x%lx to physical addr: %lx\n", init_vAddr, init_pAddr);
|
||||
|
||||
// Sanity check
|
||||
uint64_t pAddr = page_table_walk(init_vAddr);
|
||||
if (pAddr != init_pAddr)
|
||||
{
|
||||
assert(pAddr == init_pAddr && "ERROR: translated virtual Addresses are not the same with physical Address");
|
||||
}
|
||||
|
||||
*dev_pAddr = init_vAddr; // commit vpn to be returned to host
|
||||
DBGPRINT(" [RT:PTV_MAP] Translated device virtual addr: 0x%lx\n", *dev_pAddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mem_reserve(uint64_t dev_addr, uint64_t size, int flags) {
|
||||
CHECK_ERR(global_mem_.reserve(dev_addr, size), {
|
||||
return err;
|
||||
});
|
||||
DBGPRINT("mem_reserve: addr: 0x%lx, size: 0x%lx\n",dev_addr, size);
|
||||
CHECK_ERR(this->mem_access(dev_addr, size, flags), {
|
||||
global_mem_.release(dev_addr);
|
||||
return err;
|
||||
});
|
||||
int mem_alloc(uint64_t size, int flags, uint64_t *dev_addr)
|
||||
{
|
||||
|
||||
uint64_t addr;
|
||||
DBGPRINT(" [RT:mem_alloc] mem_alloc size: 0x%lx\n", size);
|
||||
CHECK_ERR(global_mem_.allocate(size, &addr), {
|
||||
return err;
|
||||
});
|
||||
CHECK_ERR(this->mem_access(addr, size, flags), {
|
||||
global_mem_.release(addr);
|
||||
return err;
|
||||
});
|
||||
*dev_addr = addr;
|
||||
#ifdef VM_ENABLE
|
||||
uint64_t paddr = dev_addr;
|
||||
phy_to_virt_map(size, &paddr, flags);
|
||||
// VM address translation
|
||||
phy_to_virt_map(size, dev_addr, flags);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mem_free(uint64_t dev_addr) {
|
||||
int mem_reserve(uint64_t dev_addr, uint64_t size, int flags)
|
||||
{
|
||||
CHECK_ERR(global_mem_.reserve(dev_addr, size), {
|
||||
return err;
|
||||
});
|
||||
DBGPRINT(" [RT:mem_reserve] mem_reserve: addr: 0x%lx, size: 0x%lx\n", dev_addr, size);
|
||||
CHECK_ERR(this->mem_access(dev_addr, size, flags), {
|
||||
global_mem_.release(dev_addr);
|
||||
return err;
|
||||
});
|
||||
#ifdef VM_ENABLE
|
||||
uint64_t pAddr = page_table_walk(dev_addr);
|
||||
// VM address translation
|
||||
return global_mem_.release(pAddr);
|
||||
uint64_t paddr = dev_addr;
|
||||
phy_to_virt_map(size, &paddr, flags);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mem_free(uint64_t dev_addr)
|
||||
{
|
||||
#ifdef VM_ENABLE
|
||||
uint64_t pAddr = page_table_walk(dev_addr);
|
||||
// VM address translation
|
||||
return global_mem_.release(pAddr);
|
||||
#else
|
||||
return global_mem_.release(dev_addr);
|
||||
return global_mem_.release(dev_addr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
int mem_access(uint64_t dev_addr, uint64_t size, int flags) {
|
||||
int mem_access(uint64_t dev_addr, uint64_t size, int flags)
|
||||
{
|
||||
uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
|
||||
if (dev_addr + asize > GLOBAL_MEM_SIZE)
|
||||
return -1;
|
||||
|
@ -285,7 +288,8 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mem_info(uint64_t* mem_free, uint64_t* mem_used) const {
|
||||
int mem_info(uint64_t *mem_free, uint64_t *mem_used) const
|
||||
{
|
||||
if (mem_free)
|
||||
*mem_free = global_mem_.free();
|
||||
if (mem_used)
|
||||
|
@ -293,21 +297,23 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int upload(uint64_t dest_addr, const void* src, uint64_t size) {
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
|
||||
if (dest_addr + asize > GLOBAL_MEM_SIZE)
|
||||
return -1;
|
||||
int upload(uint64_t dest_addr, const void *src, uint64_t size)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
|
||||
if (dest_addr + asize > GLOBAL_MEM_SIZE)
|
||||
return -1;
|
||||
#ifdef VM_ENABLE
|
||||
uint64_t pAddr = page_table_walk(dest_addr);
|
||||
DBGPRINT("Upload data to vAddr = 0x%lx (pAddr=0x%lx)\n", dest_addr, pAddr);
|
||||
|
||||
uint64_t pAddr = page_table_walk(dest_addr);
|
||||
DBGPRINT(" [RT:upload] Upload data to vAddr = 0x%lx (pAddr=0x%lx)\n", dest_addr, pAddr);
|
||||
dest_addr = pAddr; //Overwirte
|
||||
#endif
|
||||
|
||||
ram_.enable_acl(false);
|
||||
ram_.write((const uint8_t*)src, dest_addr, size);
|
||||
ram_.write((const uint8_t *)src, dest_addr, size);
|
||||
ram_.enable_acl(true);
|
||||
|
||||
|
||||
/*DBGPRINT("upload %ld bytes to 0x%lx\n", size, dest_addr);
|
||||
for (uint64_t i = 0; i < size && i < 1024; i += 4) {
|
||||
DBGPRINT(" 0x%lx <- 0x%x\n", dest_addr + i, *(uint32_t*)((uint8_t*)src + i));
|
||||
|
@ -316,17 +322,19 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int download(void* dest, uint64_t src_addr, uint64_t size) {
|
||||
int download(void *dest, uint64_t src_addr, uint64_t size)
|
||||
{
|
||||
uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
|
||||
if (src_addr + asize > GLOBAL_MEM_SIZE)
|
||||
return -1;
|
||||
#ifdef VM_ENABLE
|
||||
uint64_t pAddr = page_table_walk(src_addr);
|
||||
DBGPRINT("Download data to vAddr = 0x%lx (pAddr=0x%lx)\n", src_addr, pAddr);
|
||||
uint64_t pAddr = page_table_walk(src_addr);
|
||||
DBGPRINT(" [RT:download] Download data to vAddr = 0x%lx (pAddr=0x%lx)\n", src_addr, pAddr);
|
||||
src_addr = pAddr; //Overwirte
|
||||
#endif
|
||||
|
||||
ram_.enable_acl(false);
|
||||
ram_.read((uint8_t*)dest, src_addr, size);
|
||||
ram_.read((uint8_t *)dest, src_addr, size);
|
||||
ram_.enable_acl(true);
|
||||
|
||||
/*DBGPRINT("download %ld bytes from 0x%lx\n", size, src_addr);
|
||||
|
@ -337,9 +345,11 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int start(uint64_t krnl_addr, uint64_t args_addr) {
|
||||
int start(uint64_t krnl_addr, uint64_t args_addr)
|
||||
{
|
||||
// ensure prior run completed
|
||||
if (future_.valid()) {
|
||||
if (future_.valid())
|
||||
{
|
||||
future_.wait();
|
||||
}
|
||||
|
||||
|
@ -350,9 +360,8 @@ public:
|
|||
this->dcr_write(VX_DCR_BASE_STARTUP_ARG1, args_addr >> 32);
|
||||
|
||||
// start new run
|
||||
future_ = std::async(std::launch::async, [&]{
|
||||
processor_.run();
|
||||
});
|
||||
future_ = std::async(std::launch::async, [&]
|
||||
{ processor_.run(); });
|
||||
|
||||
// clear mpm cache
|
||||
mpm_cache_.clear();
|
||||
|
@ -360,12 +369,14 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ready_wait(uint64_t timeout) {
|
||||
int ready_wait(uint64_t timeout)
|
||||
{
|
||||
if (!future_.valid())
|
||||
return 0;
|
||||
uint64_t timeout_sec = timeout / 1000;
|
||||
std::chrono::seconds wait_time(1);
|
||||
for (;;) {
|
||||
for (;;)
|
||||
{
|
||||
// wait for 1 sec and check status
|
||||
auto status = future_.wait_for(wait_time);
|
||||
if (status == std::future_status::ready)
|
||||
|
@ -376,8 +387,10 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dcr_write(uint32_t addr, uint32_t value) {
|
||||
if (future_.valid()) {
|
||||
int dcr_write(uint32_t addr, uint32_t value)
|
||||
{
|
||||
if (future_.valid())
|
||||
{
|
||||
future_.wait(); // ensure prior run completed
|
||||
}
|
||||
processor_.dcr_write(addr, value);
|
||||
|
@ -385,15 +398,18 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dcr_read(uint32_t addr, uint32_t* value) const {
|
||||
int dcr_read(uint32_t addr, uint32_t *value) const
|
||||
{
|
||||
return dcrs_.read(addr, value);
|
||||
}
|
||||
|
||||
int mpm_query(uint32_t addr, uint32_t core_id, uint64_t* value) {
|
||||
int mpm_query(uint32_t addr, uint32_t core_id, uint64_t *value)
|
||||
{
|
||||
uint32_t offset = addr - VX_CSR_MPM_BASE;
|
||||
if (offset > 31)
|
||||
return -1;
|
||||
if (mpm_cache_.count(core_id) == 0) {
|
||||
if (mpm_cache_.count(core_id) == 0)
|
||||
{
|
||||
uint64_t mpm_mem_addr = IO_MPM_ADDR + core_id * 32 * sizeof(uint64_t);
|
||||
CHECK_ERR(this->download(mpm_cache_[core_id].data(), mpm_mem_addr, 32 * sizeof(uint64_t)), {
|
||||
return err;
|
||||
|
@ -404,247 +420,250 @@ public:
|
|||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
/* VM Management */
|
||||
void set_processor_satp(VA_MODE mode)
|
||||
/* VM Management */
|
||||
void set_processor_satp(VA_MODE mode)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint64_t satp = 0;
|
||||
if (mode == VA_MODE::BARE)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint32_t satp;
|
||||
if (mode == VA_MODE::BARE)
|
||||
{
|
||||
DBGPRINT("VA_MODE = BARE MODE");
|
||||
satp = 0;
|
||||
}
|
||||
else if (mode == VA_MODE::SV32)
|
||||
{
|
||||
satp = (alloc_2nd_level_page_table() >> 12) | 0x80000000;
|
||||
DBGPRINT("VA_MODE = SV32 MODE(satp = 0x%x)\n",satp);
|
||||
}
|
||||
processor_.set_satp(satp);
|
||||
DBGPRINT(" [RT:set_satp] VA_MODE = BARE MODE");
|
||||
}
|
||||
else
|
||||
{
|
||||
satp = (alloc_2nd_level_page_table() / MEM_PAGE_SIZE) | (1 << SATP_MODE_IDX);
|
||||
DBGPRINT(" [RT:set_satp] VA_MODE = SV mode (satp = 0x%lx)\n", satp);
|
||||
}
|
||||
processor_.set_satp(satp);
|
||||
}
|
||||
|
||||
uint64_t get_ptbr()
|
||||
{
|
||||
// return processor_.get_satp();
|
||||
return processor_.get_satp() & ((1 << SATP_PPN_WIDTH) - 1);
|
||||
}
|
||||
uint64_t get_pte_address(uint64_t base_page, uint64_t vpn)
|
||||
{
|
||||
return (base_page * MEM_PAGE_SIZE) + (vpn * PTE_SIZE);
|
||||
}
|
||||
|
||||
VA_MODE get_mode()
|
||||
{
|
||||
#ifdef XLEN_32
|
||||
return processor_.get_satp() & (1 << SATP_MODE_IDX) ? VA_MODE::SV32 : VA_MODE::BARE;
|
||||
#else // 64 bit
|
||||
return processor_.get_satp() & (1 << SATP_MODE_IDX) ? VA_MODE::SV64 : VA_MODE::BARE;
|
||||
#endif
|
||||
}
|
||||
|
||||
void update_page_table(uint64_t ppn, uint64_t vpn, uint32_t flag)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT(" [RT:Update PT] Mapping vpn 0x%05lx to ppn 0x%05lx(flags = %u)\n", vpn, ppn, flag);
|
||||
assert((((ppn >> 20) == 0) && ((vpn >> 20) == 0)) && "Upper 12 bits are not zero!");
|
||||
// Updating page table with the following mapping of (vAddr) to (pAddr).
|
||||
// uint32_t page_bit_shift = log2ceil(PTE_SIZE*NUM_PTE_ENTRY);
|
||||
uint64_t ppn_1 = 0, pte_addr = 0, pte_bytes = 0;
|
||||
uint64_t vpn_1 = bits(vpn, 10, 19);
|
||||
uint64_t vpn_0 = bits(vpn, 0, 9);
|
||||
|
||||
// Read first level PTE.
|
||||
DBGPRINT(" [RT:Update PT]Start second-level page table\n");
|
||||
pte_addr = get_pte_address(get_ptbr(), vpn_1);
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
DBGPRINT(" [RT:Update PT] PTE addr 0x%lx, PTE bytes 0x%lx\n", pte_addr, pte_bytes);
|
||||
ppn_1 = (pte_bytes >> 10);
|
||||
|
||||
if (bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
|
||||
{
|
||||
// If valid bit set, proceed to next level using new ppn form PTE.
|
||||
DBGPRINT(" [RT:Update PT] PTE valid (ppn 0x%lx), continuing the walk...\n", ppn_1);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If valid bit not set, allocate a second level page table
|
||||
// in device memory and store ppn in PTE. Set rwx = 000 in PTE
|
||||
// to indicate this is a pointer to the next level of the page table.
|
||||
DBGPRINT(" [RT:Update PT] PTE Invalid (ppn 0x%lx), continuing the walk...\n", ppn_1);
|
||||
ppn_1 = (alloc_1st_level_page_table(vpn_1) >> 12);
|
||||
pte_bytes = ((ppn_1 << 10) | 0b0000000001);
|
||||
assert((pte_addr >> 32) == 0 && "Upper 32 bits are not zero!");
|
||||
write_pte(pte_addr, pte_bytes);
|
||||
// if (pte_bytes != read_pte(pte_addr))
|
||||
// DBGPRINT("Read/write values are different!\n");
|
||||
}
|
||||
|
||||
uint32_t get_ptbr()
|
||||
{
|
||||
// return processor_.get_satp();
|
||||
return processor_.get_satp() & 0x003fffff;
|
||||
DBGPRINT(" [RT:Update PT] Move to first-level page table\n");
|
||||
// Read second level PTE.
|
||||
pte_addr = get_pte_address(ppn_1, vpn_0);
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
|
||||
if (bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
|
||||
{
|
||||
DBGPRINT(" [RT:Update PT] ERROR, shouldn't be here\n");
|
||||
exit(1);
|
||||
// If valid bit is set, then the page is already allocated.
|
||||
// Should not reach this point, a sanity check.
|
||||
}
|
||||
uint64_t get_pte_address(uint64_t base_page, uint64_t vpn)
|
||||
else
|
||||
{
|
||||
return (base_page << 12) + (vpn * PTE_SIZE);
|
||||
}
|
||||
// If valid bit not set, write ppn of pAddr in PTE. Set rwx = 111 in PTE
|
||||
// to indicate this is a leaf PTE and has the stated permissions.
|
||||
pte_bytes = ((ppn << 10) | 0b0000001111);
|
||||
write_pte(pte_addr, pte_bytes);
|
||||
if (pte_bytes != read_pte(pte_addr))
|
||||
DBGPRINT(" [RT:Update PT] PTE write value and read value are not matched!\n");
|
||||
}
|
||||
}
|
||||
|
||||
VA_MODE get_mode()
|
||||
uint64_t page_table_walk(uint64_t vAddr_bits)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT(" [RT:PTW] start vAddr: 0x%lx\n", vAddr_bits);
|
||||
if (!need_trans(vAddr_bits))
|
||||
{
|
||||
return processor_.get_satp() & 0x80000000 ? VA_MODE::SV32 : VA_MODE::BARE;
|
||||
}
|
||||
DBGPRINT(" [RT:PTW] Translation is not needed.\n");
|
||||
return vAddr_bits;
|
||||
}
|
||||
uint64_t LEVELS = 2;
|
||||
vAddr_SV32_t vAddr(vAddr_bits);
|
||||
uint64_t pte_addr, pte_bytes;
|
||||
uint64_t pt_ba = get_ptbr() << 12;
|
||||
|
||||
void update_page_table(uint64_t ppn, uint64_t vpn, uint32_t flag) {
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT("Mapping vpn 0x%05lx to ppn 0x%05lx(flags = %u)\n", vpn, ppn,flag);
|
||||
assert((((ppn>> 20) == 0) && ((vpn >> 20) == 0)) && "Upper 12 bits are not zero!");
|
||||
//Updating page table with the following mapping of (vAddr) to (pAddr).
|
||||
// uint32_t page_bit_shift = log2ceil(PTE_SIZE*NUM_PTE_ENTRY);
|
||||
uint64_t ppn_1 = 0, pte_addr = 0, pte_bytes = 0;
|
||||
uint64_t vpn_1 = bits(vpn, 10, 19);
|
||||
uint64_t vpn_0 = bits(vpn, 0, 9);
|
||||
// Get base page table.
|
||||
|
||||
//Read first level PTE.
|
||||
DBGPRINT("Start second-level page table\n");
|
||||
pte_addr = get_pte_address(get_ptbr(), vpn_1);
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
DBGPRINT("[PTE] addr 0x%lx, PTE 0x%lx\n", pte_addr, pte_bytes);
|
||||
ppn_1 = (pte_bytes >> 10);
|
||||
for (int i = LEVELS - 1; i >= 0; i--)
|
||||
{
|
||||
// Read PTE.
|
||||
pte_addr = pt_ba + vAddr.vpn[i] * PTE_SIZE;
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
PTE_SV32_t pte(pte_bytes);
|
||||
DBGPRINT(" [RT:PTW] Level[%u] pte_bytes = 0x%lx, pte flags = %u)\n", i, pte.ppn, pte.flags);
|
||||
|
||||
if ( bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
|
||||
// Check if it has invalid flag bits.
|
||||
if ((pte.v == 0) | ((pte.r == 0) & (pte.w == 1)))
|
||||
{
|
||||
std::string msg = " [RT:PTW] Page Fault : Attempted to access invalid entry. Entry: 0x";
|
||||
throw Page_Fault_Exception(msg);
|
||||
}
|
||||
|
||||
if ((pte.r == 0) & (pte.w == 0) & (pte.x == 0))
|
||||
{
|
||||
// Not a leaf node as rwx == 000
|
||||
if (i == 0)
|
||||
{
|
||||
//If valid bit set, proceed to next level using new ppn form PTE.
|
||||
DBGPRINT("PTE valid (ppn 0x%lx), continuing the walk...\n",ppn_1);
|
||||
throw Page_Fault_Exception(" [RT:PTW] Page Fault : No leaf node found.");
|
||||
}
|
||||
else
|
||||
{
|
||||
//If valid bit not set, allocate a second level page table
|
||||
// in device memory and store ppn in PTE. Set rwx = 000 in PTE
|
||||
//to indicate this is a pointer to the next level of the page table.
|
||||
DBGPRINT("PTE Invalid (ppn 0x%lx), continuing the walk...\n",ppn_1);
|
||||
ppn_1 = (alloc_1st_level_page_table(vpn_1) >> 12);
|
||||
pte_bytes = ((ppn_1 << 10) | 0b0000000001) ;
|
||||
assert((pte_addr>> 32) == 0 && "Upper 32 bits are not zero!");
|
||||
write_pte(pte_addr, pte_bytes);
|
||||
// if (pte_bytes != read_pte(pte_addr))
|
||||
// DBGPRINT("Read/write values are different!\n");
|
||||
}
|
||||
|
||||
|
||||
DBGPRINT("Move to first-level page table\n");
|
||||
//Read second level PTE.
|
||||
pte_addr = get_pte_address(ppn_1, vpn_0);
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
|
||||
if ( bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
|
||||
{
|
||||
DBGPRINT("ERROR, shouldn't be here\n");
|
||||
exit(1);
|
||||
//If valid bit is set, then the page is already allocated.
|
||||
//Should not reach this point, a sanity check.
|
||||
}
|
||||
else
|
||||
{
|
||||
//If valid bit not set, write ppn of pAddr in PTE. Set rwx = 111 in PTE
|
||||
//to indicate this is a leaf PTE and has the stated permissions.
|
||||
pte_bytes = ( (ppn << 10) | 0b0000001111) ;
|
||||
write_pte(pte_addr, pte_bytes);
|
||||
if (pte_bytes != read_pte(pte_addr))
|
||||
DBGPRINT("Read/write values are different!\n");
|
||||
// Continue on to next level.
|
||||
pt_ba = pte.ppn << 12;
|
||||
DBGPRINT(" [RT:PTW] next pt_ba: %p\n", (void *)pt_ba);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Leaf node found, finished walking.
|
||||
pt_ba = pte.ppn << 12;
|
||||
DBGPRINT(" [RT:PTW] Found PT_Base_Address [%d] = %lx\n", i, pt_ba);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t page_table_walk(uint64_t vAddr_bits)
|
||||
{
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT("PTW on vAddr: 0x%lx\n", vAddr_bits);
|
||||
if (!need_trans(vAddr_bits))
|
||||
{
|
||||
DBGPRINT("Translation is not needed.\n");
|
||||
return vAddr_bits;
|
||||
}
|
||||
uint64_t LEVELS = 2;
|
||||
vAddr_SV32_t vAddr(vAddr_bits);
|
||||
uint64_t pte_addr, pte_bytes;
|
||||
uint64_t pt_ba = get_ptbr() << 12;
|
||||
|
||||
//Get base page table.
|
||||
|
||||
for ( int i = LEVELS-1 ; i >= 0 ; i--)
|
||||
{
|
||||
//Read PTE.
|
||||
pte_addr = pt_ba+vAddr.vpn[i]*PTE_SIZE;
|
||||
pte_bytes = read_pte(pte_addr);
|
||||
PTE_SV32_t pte(pte_bytes);
|
||||
DBGPRINT("pte_bytes = 0x%lx, pte flags = %u)\n", pte.ppn , pte.flags);
|
||||
|
||||
//Check if it has invalid flag bits.
|
||||
if ( (pte.v == 0) | ( (pte.r == 0) & (pte.w == 1) ) )
|
||||
{
|
||||
std::string msg= "Page Fault : Attempted to access invalid entry. Entry: 0x";
|
||||
throw Page_Fault_Exception(msg);
|
||||
}
|
||||
|
||||
if ( (pte.r == 0) & (pte.w == 0) & (pte.x == 0))
|
||||
{
|
||||
//Not a leaf node as rwx == 000
|
||||
if (i == 0)
|
||||
{
|
||||
throw Page_Fault_Exception("Page Fault : No leaf node found.");
|
||||
}
|
||||
else
|
||||
{
|
||||
//Continue on to next level.
|
||||
pt_ba = pte.ppn << 12;
|
||||
DBGPRINT("next pt_ba: %p\n", (void *)pt_ba);
|
||||
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
//Leaf node found, finished walking.
|
||||
pt_ba = pte.ppn << 12;
|
||||
DBGPRINT("Found PT_Base_Address [%d] = %lx\n", i, pt_ba);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// pte_bytes is final leaf
|
||||
PTE_SV32_t pte(pte_bytes);
|
||||
//Check RWX permissions according to access type.
|
||||
if (pte.r == 0)
|
||||
{
|
||||
throw Page_Fault_Exception("Page Fault : TYPE LOAD, Incorrect permissions.");
|
||||
}
|
||||
|
||||
uint64_t paddr = pt_ba + vAddr.pgoff;
|
||||
return paddr;
|
||||
// pte_bytes is final leaf
|
||||
PTE_SV32_t pte(pte_bytes);
|
||||
// Check RWX permissions according to access type.
|
||||
if (pte.r == 0)
|
||||
{
|
||||
throw Page_Fault_Exception(" [RT:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
|
||||
}
|
||||
|
||||
uint64_t alloc_2nd_level_page_table() {
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint64_t addr=PAGE_TABLE_BASE_ADDR;
|
||||
uint64_t size=1<<23; // 8MB !!!FIXME!!!
|
||||
CHECK_ERR(this->mem_reserve(addr, size, VX_MEM_READ_WRITE), {
|
||||
return err;
|
||||
});
|
||||
init_page_table(addr);
|
||||
return addr;
|
||||
uint64_t paddr = pt_ba + vAddr.pgoff;
|
||||
return paddr;
|
||||
}
|
||||
|
||||
uint64_t alloc_2nd_level_page_table()
|
||||
{
|
||||
uint64_t addr = PAGE_TABLE_BASE_ADDR;
|
||||
uint64_t size = PT_TOTAL_SIZE;
|
||||
CHECK_ERR(this->mem_reserve(addr, size, VX_MEM_READ_WRITE), {
|
||||
return err;
|
||||
});
|
||||
init_page_table(addr);
|
||||
return addr;
|
||||
}
|
||||
uint64_t alloc_1st_level_page_table(uint64_t vpn_1)
|
||||
{
|
||||
uint64_t addr = PAGE_TABLE_BASE_ADDR + PT_SIZE * (1 + vpn_1);
|
||||
init_page_table(addr);
|
||||
return addr;
|
||||
}
|
||||
|
||||
// Initialize to zero the target page table area. 32bit 4K, 64bit 8K
|
||||
void init_page_table(uint64_t addr)
|
||||
{
|
||||
uint64_t asize = aligned_size(PT_SIZE, CACHE_BLOCK_SIZE);
|
||||
DBGPRINT(" [RT:init_page_table] (addr=0x%lx, size=0x%lx)\n", addr, asize);
|
||||
uint8_t *src = new uint8_t[asize];
|
||||
for (uint64_t i = 0; i < PT_SIZE; ++i)
|
||||
{
|
||||
src[i] = 0;
|
||||
}
|
||||
uint64_t alloc_1st_level_page_table(uint64_t vpn_1) {
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
uint64_t addr = PAGE_TABLE_BASE_ADDR + PTE_SIZE * NUM_PTE_ENTRY*(1+vpn_1);
|
||||
init_page_table(addr);
|
||||
return addr;
|
||||
ram_.enable_acl(false);
|
||||
ram_.write((const uint8_t *)src, addr, asize);
|
||||
ram_.enable_acl(true);
|
||||
}
|
||||
|
||||
// void read_page_table(uint64_t addr) {
|
||||
// uint8_t *dest = new uint8_t[MEM_PAGE_SIZE];
|
||||
// download(dest, addr, MEM_PAGE_SIZE);
|
||||
// DBGPRINT("VXDRV: download %d bytes from 0x%x\n", MEM_PAGE_SIZE, addr);
|
||||
// for (int i = 0; i < MEM_PAGE_SIZE; i += 4) {
|
||||
// DBGPRINT("mem-read: 0x%x -> 0x%x\n", addr + i, *(uint64_t*)((uint8_t*)dest + i));
|
||||
// }
|
||||
// }
|
||||
|
||||
void write_pte(uint64_t addr, uint64_t value = 0xbaadf00d)
|
||||
{
|
||||
DBGPRINT(" [RT:Write_pte] writing pte 0x%lx to pAddr: 0x%lx\n", value, addr);
|
||||
uint8_t *src = new uint8_t[PTE_SIZE];
|
||||
for (uint64_t i = 0; i < PTE_SIZE; ++i)
|
||||
{
|
||||
src[i] = (value >> (i << 3)) & 0xff;
|
||||
}
|
||||
// std::cout << "writing PTE to RAM addr 0x" << std::hex << addr << std::endl;
|
||||
ram_.enable_acl(false);
|
||||
ram_.write((const uint8_t *)src, addr, PTE_SIZE);
|
||||
ram_.enable_acl(true);
|
||||
}
|
||||
|
||||
void init_page_table(uint64_t addr) {
|
||||
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT("int_page_table (addr=0x%lx)\n", addr);
|
||||
uint64_t asize = aligned_size(RAM_PAGE_SIZE, CACHE_BLOCK_SIZE);
|
||||
// uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
|
||||
uint8_t *src = new uint8_t[RAM_PAGE_SIZE];
|
||||
for (uint64_t i = 0; i < RAM_PAGE_SIZE; ++i) {
|
||||
src[i] = 0;
|
||||
}
|
||||
ram_.enable_acl(false);
|
||||
ram_.write((const uint8_t*)src, addr, asize);
|
||||
ram_.enable_acl(true);
|
||||
}
|
||||
uint64_t read_pte(uint64_t addr)
|
||||
{
|
||||
uint8_t *dest = new uint8_t[PTE_SIZE];
|
||||
#ifdef XLEN_32
|
||||
uint64_t mask = 0x00000000FFFFFFFF;
|
||||
#else // 64bit
|
||||
uint64_t mask = 0xFFFFFFFFFFFFFFFF;
|
||||
#endif
|
||||
|
||||
// void read_page_table(uint64_t addr) {
|
||||
// uint8_t *dest = new uint8_t[RAM_PAGE_SIZE];
|
||||
// download(dest, addr, RAM_PAGE_SIZE);
|
||||
// DBGPRINT("VXDRV: download %d bytes from 0x%x\n", RAM_PAGE_SIZE, addr);
|
||||
// for (int i = 0; i < RAM_PAGE_SIZE; i += 4) {
|
||||
// DBGPRINT("mem-read: 0x%x -> 0x%x\n", addr + i, *(uint64_t*)((uint8_t*)dest + i));
|
||||
// }
|
||||
// }
|
||||
ram_.read((uint8_t *)dest, addr, PTE_SIZE);
|
||||
uint64_t ret = (*(uint64_t *)((uint8_t *)dest)) & mask;
|
||||
DBGPRINT(" [RT:read_pte] reading PTE 0x%lx from RAM addr 0x%lx\n", ret, addr);
|
||||
|
||||
void write_pte(uint64_t addr, uint64_t value = 0xbaadf00d) {
|
||||
DBGPRINT("[Write_pte] writing pte 0x%lx to pAddr: 0x%lx\n", value, addr);
|
||||
uint8_t *src = new uint8_t[PTE_SIZE];
|
||||
for (uint64_t i = 0; i < PTE_SIZE; ++i) {
|
||||
src[i] = (value >> (i << 3)) & 0xff;
|
||||
}
|
||||
//std::cout << "writing PTE to RAM addr 0x" << std::hex << addr << std::endl;
|
||||
ram_.enable_acl(false);
|
||||
ram_.write((const uint8_t*)src, addr, PTE_SIZE);
|
||||
ram_.enable_acl(true);
|
||||
}
|
||||
|
||||
uint64_t read_pte(uint64_t addr) {
|
||||
uint8_t *dest = new uint8_t[PTE_SIZE];
|
||||
uint64_t mask = 0;
|
||||
if (XLEN == 32)
|
||||
mask = 0x00000000FFFFFFFF;
|
||||
else if (XLEN == 64)
|
||||
mask = 0xFFFFFFFFFFFFFFFF;
|
||||
else
|
||||
assert(0 && "XLEN is not either 32 or 64");
|
||||
|
||||
ram_.read((uint8_t*)dest, addr, PTE_SIZE);
|
||||
uint64_t ret = (*(uint64_t*)((uint8_t*)dest)) & mask;
|
||||
DBGPRINT("[read_pte] reading PTE 0x%lx from RAM addr 0x%lx\n", ret, addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif // JAEWON
|
||||
|
||||
private:
|
||||
Arch arch_;
|
||||
RAM ram_;
|
||||
Processor processor_;
|
||||
MemoryAllocator global_mem_;
|
||||
DeviceConfig dcrs_;
|
||||
std::future<void> future_;
|
||||
Arch arch_;
|
||||
RAM ram_;
|
||||
Processor processor_;
|
||||
MemoryAllocator global_mem_;
|
||||
DeviceConfig dcrs_;
|
||||
std::future<void> future_;
|
||||
std::unordered_map<uint32_t, std::array<uint64_t, 32>> mpm_cache_;
|
||||
#ifdef VM_ENABLE
|
||||
std::unordered_map<uint64_t, uint64_t> addr_mapping;
|
||||
std::unordered_map<uint64_t, uint64_t> addr_mapping;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,13 @@
|
|||
#include <bitset>
|
||||
|
||||
using namespace vortex;
|
||||
#ifdef VM_ENABLE
|
||||
#ifndef NDEBUG
|
||||
#define DBGPRINT(format, ...) do { printf("[VXDRV] " format "", ##__VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define DBGPRINT(format, ...) ((void)0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
|
||||
{
|
||||
|
@ -115,7 +122,6 @@ void MemoryUnit::ADecoder::map(uint64_t start, uint64_t end, MemDevice &md) {
|
|||
}
|
||||
|
||||
void MemoryUnit::ADecoder::read(void* data, uint64_t addr, uint64_t size) {
|
||||
// printf("====%s (addr= 0x%lx, size= 0x%lx) ====\n", __PRETTY_FUNCTION__,addr,size);
|
||||
mem_accessor_t ma;
|
||||
if (!this->lookup(addr, size, &ma)) {
|
||||
std::cout << "lookup of 0x" << std::hex << addr << " failed.\n";
|
||||
|
@ -125,7 +131,6 @@ void MemoryUnit::ADecoder::read(void* data, uint64_t addr, uint64_t size) {
|
|||
}
|
||||
|
||||
void MemoryUnit::ADecoder::write(const void* data, uint64_t addr, uint64_t size) {
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
mem_accessor_t ma;
|
||||
if (!this->lookup(addr, size, &ma)) {
|
||||
std::cout << "lookup of 0x" << std::hex << addr << " failed.\n";
|
||||
|
@ -138,7 +143,9 @@ void MemoryUnit::ADecoder::write(const void* data, uint64_t addr, uint64_t size)
|
|||
|
||||
MemoryUnit::MemoryUnit(uint64_t pageSize)
|
||||
: pageSize_(pageSize)
|
||||
#ifndef VM_ENABLE
|
||||
, enableVM_(pageSize != 0)
|
||||
#endif
|
||||
, amo_reservation_({0x0, false})
|
||||
#ifdef VM_ENABLE
|
||||
, TLB_HIT(0)
|
||||
|
@ -158,9 +165,9 @@ void MemoryUnit::attach(MemDevice &m, uint64_t start, uint64_t end) {
|
|||
decoder_.map(start, end, m);
|
||||
}
|
||||
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
std::pair<bool, uint64_t> MemoryUnit::tlbLookup(uint64_t vAddr, ACCESS_TYPE type, uint64_t* size_bits) {
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
|
||||
//Find entry while accounting for different sizes.
|
||||
for (auto entry : tlb_)
|
||||
|
@ -201,7 +208,7 @@ std::pair<bool, uint64_t> MemoryUnit::tlbLookup(uint64_t vAddr, ACCESS_TYPE type
|
|||
|
||||
}
|
||||
//Check access permissions.
|
||||
if ( (type == ACCESS_TYPE::FETCH) & ((e.r == 0) | (e.x == 0)) )
|
||||
if ( (type == ACCESS_TYPE::FENCE) & ((e.r == 0) | (e.x == 0)) )
|
||||
{
|
||||
throw Page_Fault_Exception("Page Fault : Incorrect permissions.");
|
||||
}
|
||||
|
@ -251,7 +258,7 @@ uint64_t MemoryUnit::toPhyAddr(uint64_t addr, uint32_t flagMask) {
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
void MemoryUnit::read(void* data, uint64_t addr, uint32_t size, ACCESS_TYPE type) {
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT(" [MMU:read] 0x%lx, 0x%x, %u\n",addr,size,type);
|
||||
uint64_t pAddr;
|
||||
pAddr = vAddr_to_pAddr(addr, type);
|
||||
return decoder_.read(data, pAddr, size);
|
||||
|
@ -264,7 +271,7 @@ void MemoryUnit::read(void* data, uint64_t addr, uint32_t size, bool sup) {
|
|||
#endif
|
||||
#ifdef VM_ENABLE
|
||||
void MemoryUnit::write(const void* data, uint64_t addr, uint32_t size, ACCESS_TYPE type) {
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
DBGPRINT(" [MMU:Write] 0x%lx, 0x%x, %u\n",addr,size,type);
|
||||
uint64_t pAddr;
|
||||
pAddr = vAddr_to_pAddr(addr, type);
|
||||
decoder_.write(data, pAddr, size);
|
||||
|
@ -280,6 +287,7 @@ void MemoryUnit::write(const void* data, uint64_t addr, uint32_t size, bool sup)
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
void MemoryUnit::amo_reserve(uint64_t addr) {
|
||||
DBGPRINT(" [MMU:amo_reserve] 0x%lx\n",addr);
|
||||
uint64_t pAddr = this->vAddr_to_pAddr(addr,ACCESS_TYPE::LOAD);
|
||||
amo_reservation_.addr = pAddr;
|
||||
amo_reservation_.valid = true;
|
||||
|
@ -294,6 +302,7 @@ void MemoryUnit::amo_reserve(uint64_t addr) {
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
bool MemoryUnit::amo_check(uint64_t addr) {
|
||||
DBGPRINT(" [MMU:amo_check] 0x%lx\n",addr);
|
||||
uint64_t pAddr = this->vAddr_to_pAddr(addr, ACCESS_TYPE::LOAD);
|
||||
return amo_reservation_.valid && (amo_reservation_.addr == pAddr);
|
||||
}
|
||||
|
@ -593,30 +602,30 @@ void RAM::loadHexImage(const char* filename) {
|
|||
#ifdef VM_ENABLE
|
||||
|
||||
bool MemoryUnit::need_trans(uint64_t dev_pAddr)
|
||||
{
|
||||
// Check if the this is the BARE mode
|
||||
bool isBAREMode = (this->mode == VA_MODE::BARE);
|
||||
// Check if the address is reserved
|
||||
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
|
||||
// Check if the address falls within the startup address range
|
||||
bool isStartAddress = (STARTUP_ADDR <= dev_pAddr) && (dev_pAddr < (STARTUP_ADDR + 0x40000));
|
||||
|
||||
// Print the boolean results for debugging purposes
|
||||
// printf("0x%lx, %u, %u, %u \n", dev_pAddr,isBAREMode, isReserved, isStartAddress);
|
||||
|
||||
// Return true if the address needs translation (i.e., it's not reserved and not a start address)
|
||||
return (!isBAREMode && !isReserved && !isStartAddress);
|
||||
}
|
||||
{
|
||||
// Check if the this is the BARE mode
|
||||
bool isBAREMode = (this->mode == VA_MODE::BARE);
|
||||
// Check if the address is reserved for system usage
|
||||
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
|
||||
// Check if the address is reserved for IO usage
|
||||
bool isIO= (dev_pAddr < USER_BASE_ADDR);
|
||||
// Check if the address falls within the startup address range
|
||||
bool isStartAddress = (STARTUP_ADDR <= dev_pAddr) && (dev_pAddr <= (STARTUP_ADDR + 0x40000));
|
||||
|
||||
// Print the boolean results for debugging purposes
|
||||
// printf("%p, %u, %u\n", (void *)dev_pAddr, isReserved, isStartAddress);
|
||||
|
||||
// Return true if the address needs translation (i.e., it's not reserved and not a start address)
|
||||
return (!isBAREMode && !isReserved && !isIO && !isStartAddress);
|
||||
}
|
||||
uint64_t MemoryUnit::vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type)
|
||||
{
|
||||
uint64_t pfn;
|
||||
uint64_t size_bits;
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
// printf("vaddr = 0x%lx, type = 0x%u\n",vAddr,type);
|
||||
DBGPRINT(" [MMU: V2P] vaddr = 0x%lx, type = 0x%u\n",vAddr,type);
|
||||
if (!need_trans(vAddr))
|
||||
{
|
||||
// printf("Translation is not needed.\n");
|
||||
DBGPRINT(" [MMU: V2P] Translation is not needed.\n");
|
||||
return vAddr;
|
||||
}
|
||||
|
||||
|
@ -640,18 +649,18 @@ uint64_t MemoryUnit::vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type)
|
|||
}
|
||||
|
||||
//Construct final address using pfn and offset.
|
||||
// std::cout << "[MemoryUnit] translated vAddr: 0x" << std::hex << vAddr << " to pAddr: 0x" << std::hex << ((pfn << size_bits) + (vAddr & ((1 << size_bits) - 1))) << std::endl;
|
||||
DBGPRINT(" [MMU: V2P] translated vAddr: 0x%lx to pAddr 0x%lx",vAddr,((pfn << size_bits) + (vAddr & ((1 << size_bits) - 1))));
|
||||
return (pfn << size_bits) + (vAddr & ((1 << size_bits) - 1));
|
||||
}
|
||||
|
||||
std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint64_t* size_bits)
|
||||
{
|
||||
// printf("====%s====\n", __PRETTY_FUNCTION__);
|
||||
// printf("vaddr = 0x%lx, type = %u, size_bits %lu\n", vAddr_bits, type, *size_bits);
|
||||
DBGPRINT(" [MMU:PTW] Start: vaddr = 0x%lx, type = %u, size_bits %lu\n", vAddr_bits, type, *size_bits);
|
||||
uint64_t LEVELS = 2;
|
||||
vAddr_SV32_t vAddr(vAddr_bits);
|
||||
uint64_t pte_bytes = 0;
|
||||
|
||||
uint64_t pte_addr =0;
|
||||
//Get base page table.
|
||||
uint64_t pt_ba = this->ptbr << 12;
|
||||
int i = LEVELS - 1;
|
||||
|
@ -660,14 +669,15 @@ std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, AC
|
|||
{
|
||||
|
||||
//Read PTE.
|
||||
decoder_.read(&pte_bytes, pt_ba+vAddr.vpn[i]*PTE_SIZE, PTE_SIZE);
|
||||
pte_addr = pt_ba+vAddr.vpn[i] * PTE_SIZE;
|
||||
decoder_.read(&pte_bytes, pte_addr, PTE_SIZE);
|
||||
PTE_SV32_t pte(pte_bytes);
|
||||
DBGPRINT(" [MMU:PTW] Level[%u] pte_bytes = 0x%lx, pte flags = %u)\n", i, pte.ppn , pte.flags);
|
||||
|
||||
//Check if it has invalid flag bits.
|
||||
if ( (pte.v == 0) | ( (pte.r == 0) & (pte.w == 1) ) )
|
||||
{
|
||||
printf("Error: PTE FLAGS=0x%x\n",pte.flags);
|
||||
throw Page_Fault_Exception("Page Fault : Attempted to access invalid entry.");
|
||||
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : Attempted to access invalid entry.");
|
||||
}
|
||||
|
||||
if ( (pte.r == 0) & (pte.w == 0) & (pte.x == 0))
|
||||
|
@ -676,8 +686,7 @@ std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, AC
|
|||
i--;
|
||||
if (i < 0)
|
||||
{
|
||||
printf("Error: PTE FLAGS=0x%x\n",pte.flags);
|
||||
throw Page_Fault_Exception("Page Fault : No leaf node found.");
|
||||
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : No leaf node found.");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -696,35 +705,35 @@ std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, AC
|
|||
PTE_SV32_t pte(pte_bytes);
|
||||
|
||||
//Check RWX permissions according to access type.
|
||||
if ( (type == ACCESS_TYPE::FETCH) & ((pte.r == 0) | (pte.x == 0)) )
|
||||
if ( (type == ACCESS_TYPE::FENCE) & ((pte.r == 0) | (pte.x == 0)) )
|
||||
{
|
||||
printf("Error: PTE FLAGS=0x%x\n",pte.flags);
|
||||
throw Page_Fault_Exception("Page Fault : TYPE FETCH, Incorrect permissions.");
|
||||
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE FENCE, Incorrect permissions.");
|
||||
}
|
||||
else if ( (type == ACCESS_TYPE::LOAD) & (pte.r == 0) )
|
||||
{
|
||||
printf("Error: PTE FLAGS=0x%x\n",pte.flags);
|
||||
throw Page_Fault_Exception("Page Fault : TYPE LOAD, Incorrect permissions.");
|
||||
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
|
||||
}
|
||||
else if ( (type == ACCESS_TYPE::STORE) & (pte.w == 0) )
|
||||
{
|
||||
printf("Error: PTE FLAGS=0x%x\n",pte.flags);
|
||||
throw Page_Fault_Exception("Page Fault : TYPE STORE, Incorrect permissions.");
|
||||
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE STORE, Incorrect permissions.");
|
||||
}
|
||||
*size_bits = 12;
|
||||
uint64_t pfn = pt_ba >> *size_bits;
|
||||
return std::make_pair(pfn, pte_bytes & 0xff);
|
||||
}
|
||||
|
||||
|
||||
uint32_t MemoryUnit::get_satp()
|
||||
uint64_t MemoryUnit::get_satp()
|
||||
{
|
||||
return satp;
|
||||
}
|
||||
void MemoryUnit::set_satp(uint32_t satp)
|
||||
void MemoryUnit::set_satp(uint64_t satp)
|
||||
{
|
||||
this->satp = satp;
|
||||
this->ptbr = satp & 0x003fffff; //22 bits
|
||||
this->mode = satp & 0x80000000 ? VA_MODE::SV32 : VA_MODE::BARE;
|
||||
this->ptbr = satp & ( (1<< SATP_PPN_WIDTH) - 1);
|
||||
#ifdef XLEN_32
|
||||
this->mode = satp & (1<< SATP_MODE_IDX) ? VA_MODE::SV32 : VA_MODE::BARE;
|
||||
#else // 64 bit
|
||||
this->mode = satp & (1<< SATP_MODE_IDX) ? VA_MODE::SV64 : VA_MODE::BARE;
|
||||
#endif
|
||||
}
|
||||
#endif
|
|
@ -34,13 +34,14 @@ namespace vortex {
|
|||
#ifdef VM_ENABLE
|
||||
enum VA_MODE {
|
||||
BARE,
|
||||
SV32
|
||||
SV32,
|
||||
SV64
|
||||
};
|
||||
|
||||
enum ACCESS_TYPE {
|
||||
LOAD,
|
||||
STORE,
|
||||
FETCH
|
||||
FENCE
|
||||
};
|
||||
|
||||
class Page_Fault_Exception : public std::runtime_error /* or logic_error */
|
||||
|
@ -117,7 +118,7 @@ public:
|
|||
};
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
MemoryUnit(uint64_t pageSize = PAGE_TABLE_SIZE);
|
||||
MemoryUnit(uint64_t pageSize = MEM_PAGE_SIZE);
|
||||
#else
|
||||
MemoryUnit(uint64_t pageSize = 0);
|
||||
#endif
|
||||
|
@ -138,8 +139,8 @@ public:
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags, uint64_t size_bits);
|
||||
uint32_t get_satp();
|
||||
void set_satp(uint32_t satp);
|
||||
uint64_t get_satp();
|
||||
void set_satp(uint64_t satp);
|
||||
#else
|
||||
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags);
|
||||
#endif
|
||||
|
@ -238,14 +239,16 @@ private:
|
|||
std::unordered_map<uint64_t, TLBEntry> tlb_;
|
||||
uint64_t pageSize_;
|
||||
ADecoder decoder_;
|
||||
#ifndef VM_ENABLE
|
||||
bool enableVM_;
|
||||
#endif
|
||||
|
||||
amo_reservation_t amo_reservation_;
|
||||
#ifdef VM_ENABLE
|
||||
|
||||
uint32_t satp;
|
||||
uint64_t satp;
|
||||
VA_MODE mode;
|
||||
uint32_t ptbr;
|
||||
uint64_t ptbr;
|
||||
|
||||
std::unordered_set<uint64_t> unique_translations;
|
||||
uint64_t TLB_HIT, TLB_MISS, TLB_EVICT, PTW, PERF_UNIQUE_PTW;
|
||||
|
@ -380,7 +383,7 @@ class vAddr_SV32_t
|
|||
vpn[0] = bits(address,12,21);
|
||||
vpn[1] = bits(address,22,31);
|
||||
pgoff = bits(address,0,11);
|
||||
// printf("vpn[0] = 0x%lx, vpn[1] = 0x%lx, pgoff = 0x%lx\n",vpn[0],vpn[1],pgoff);
|
||||
// printf("vpn[1] = 0x%lx, vpn[0] = 0x%lx, pgoff = 0x%lx\n",vpn[1],vpn[0],pgoff);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -107,7 +107,7 @@ void Cluster::attach_ram(RAM* ram) {
|
|||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void Cluster::set_satp(uint32_t satp) {
|
||||
void Cluster::set_satp(uint64_t satp) {
|
||||
for (auto& socket : sockets_) {
|
||||
socket->set_satp(satp);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public:
|
|||
void attach_ram(RAM* ram);
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void set_satp(uint32_t satp);
|
||||
void set_satp(uint64_t satp);
|
||||
#endif
|
||||
|
||||
bool running() const;
|
||||
|
|
|
@ -398,7 +398,7 @@ void Core::attach_ram(RAM* ram) {
|
|||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void Core::set_satp(uint32_t satp) {
|
||||
void Core::set_satp(uint64_t satp) {
|
||||
emulator_.set_satp(satp); //JAEWON wit, tid???
|
||||
// emulator_.set_csr(VX_CSR_SATP,satp,0,0); //JAEWON wit, tid???
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ public:
|
|||
|
||||
void attach_ram(RAM* ram);
|
||||
#ifdef VM_ENABLE
|
||||
void set_satp(uint32_t satp);
|
||||
void set_satp(uint64_t satp);
|
||||
#endif
|
||||
|
||||
bool running() const;
|
||||
|
|
|
@ -270,7 +270,7 @@ bool Emulator::barrier(uint32_t bar_id, uint32_t count, uint32_t wid) {
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
void Emulator::icache_read(void *data, uint64_t addr, uint32_t size) {
|
||||
DPH(3, "*** icache_read 0x" << std::hex << addr << ", size = 0x " << size);
|
||||
// DP(1, "*** icache_read 0x" << std::hex << addr << ", size = 0x " << size);
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -289,7 +289,7 @@ void Emulator::icache_read(void *data, uint64_t addr, uint32_t size) {
|
|||
#endif
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void Emulator::set_satp(uint32_t satp) {
|
||||
void Emulator::set_satp(uint64_t satp) {
|
||||
DPH(3, "set satp 0x" << std::hex << satp << " in emulator module\n");
|
||||
set_csr(VX_CSR_SATP,satp,0,0);
|
||||
}
|
||||
|
@ -298,6 +298,7 @@ void Emulator::set_satp(uint32_t satp) {
|
|||
|
||||
#ifdef VM_ENABLE
|
||||
void Emulator::dcache_read(void *data, uint64_t addr, uint32_t size) {
|
||||
DP(1, "*** dcache_read 0x" << std::hex << addr << ", size = 0x " << size);
|
||||
auto type = get_addr_type(addr);
|
||||
if (type == AddrType::Shared) {
|
||||
core_->local_mem()->read(data, addr, size);
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
|
||||
void attach_ram(RAM* ram);
|
||||
#ifdef VM_ENABLE
|
||||
void set_satp(uint32_t satp) ;
|
||||
void set_satp(uint64_t satp) ;
|
||||
#endif
|
||||
|
||||
instr_trace_t* step();
|
||||
|
|
|
@ -83,7 +83,7 @@ void ProcessorImpl::attach_ram(RAM* ram) {
|
|||
}
|
||||
}
|
||||
#ifdef VM_ENABLE
|
||||
void ProcessorImpl::set_satp(uint32_t satp) {
|
||||
void ProcessorImpl::set_satp(uint64_t satp) {
|
||||
for (auto cluster : clusters_) {
|
||||
cluster->set_satp(satp);
|
||||
}
|
||||
|
@ -151,12 +151,12 @@ void Processor::dcr_write(uint32_t addr, uint32_t value) {
|
|||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
uint32_t Processor::get_satp() {
|
||||
uint64_t Processor::get_satp() {
|
||||
// std::cout << "get SATP: 0x" << std::hex << this->satp << std::endl;
|
||||
return this->satp;
|
||||
}
|
||||
|
||||
void Processor::set_satp(uint32_t satp) {
|
||||
void Processor::set_satp(uint64_t satp) {
|
||||
impl_->set_satp(satp);
|
||||
this->satp = satp;
|
||||
}
|
||||
|
|
|
@ -34,14 +34,14 @@ public:
|
|||
|
||||
void dcr_write(uint32_t addr, uint32_t value);
|
||||
#ifdef VM_ENABLE
|
||||
uint32_t get_satp();
|
||||
void set_satp(uint32_t satp);
|
||||
uint64_t get_satp();
|
||||
void set_satp(uint64_t satp);
|
||||
#endif
|
||||
|
||||
private:
|
||||
ProcessorImpl* impl_;
|
||||
#ifdef VM_ENABLE
|
||||
uint32_t satp;
|
||||
uint64_t satp;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -40,8 +40,7 @@ public:
|
|||
void dcr_write(uint32_t addr, uint32_t value);
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
// 32bit satp
|
||||
void set_satp(uint32_t satp);
|
||||
void set_satp(uint64_t satp);
|
||||
#endif
|
||||
|
||||
PerfStats perf_stats() const;
|
||||
|
|
|
@ -108,7 +108,7 @@ void Socket::attach_ram(RAM* ram) {
|
|||
}
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void Socket::set_satp(uint32_t satp) {
|
||||
void Socket::set_satp(uint64_t satp) {
|
||||
for (auto core : cores_) {
|
||||
core->set_satp(satp);
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ public:
|
|||
void attach_ram(RAM* ram);
|
||||
|
||||
#ifdef VM_ENABLE
|
||||
void set_satp(uint32_t satp);
|
||||
void set_satp(uint64_t satp);
|
||||
#endif
|
||||
|
||||
bool running() const;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue