64bit support

This commit is contained in:
Jaewon Lee 2024-06-29 17:43:20 -04:00 committed by Hanran Wu
parent e21bf9afbd
commit 3a5278a62e
12 changed files with 572 additions and 443 deletions

3
.gitignore vendored
View file

@ -1,3 +1,4 @@
/build*
/.vscode
*.cache
*.cache
*.code-workspace

View file

@ -163,7 +163,7 @@
`endif
`ifndef STARTUP_ADDR
`define STARTUP_ADDR 64'h080000000
`define STARTUP_ADDR 64'h180000000
`endif
`ifndef USER_BASE_ADDR
@ -270,59 +270,59 @@
`define DEBUG_LEVEL 3
`endif
`ifndef MEM_PAGE_SIZE
`define MEM_PAGE_SIZE (4096)
`endif
`ifndef MEM_PAGE_LOG2_SIZE
`define MEM_PAGE_LOG2_SIZE (12)
`endif
// Virtual Memory Configuration ///////////////////////////////////////////////////////
`ifdef VM_ENABLE
`ifdef XLEN_32
`ifndef VM_ADDR_MODE
`define VM_ADDR_MODE SV32 //or BARE
`endif
`ifndef PT_LEVEL
`define PT_LEVEL (2)
`endif
`ifndef PTE_SIZE
`define PTE_SIZE (4)
`endif
`ifndef SATP_MODE_IDX
`define SATP_MODE_IDX (31)
`ifndef NUM_PTE_ENTRY
`define NUM_PTE_ENTRY (1024)
`endif
`ifndef SATP_PPN_WIDTH
`define SATP_PPN_WIDTH (22)
`ifndef PT_SIZE_LIMIT
`define PT_SIZE_LIMIT (1<<23)
`endif
`else
`ifndef VM_ADDR_MODE
`define VM_ADDR_MODE SV64 //or BARE
`define VM_ADDR_MODE SV39 //or BARE
`endif
`ifndef PT_LEVEL
`define PT_LEVEL (3)
`endif
`ifndef PTE_SIZE
`define PTE_SIZE (8)
`endif
`ifndef SATP_MODE_IDX
`define SATP_MODE_IDX (63)
`ifndef NUM_PTE_ENTRY
`define NUM_PTE_ENTRY (512)
`endif
`ifndef SATP_PPN_WIDTH
`define SATP_PPN_WIDTH (44)
`ifndef PT_SIZE_LIMIT
`define PT_SIZE_LIMIT (1<<25)
`endif
`endif
`ifndef NUM_PTE_ENTRY
`define NUM_PTE_ENTRY (1024)
`endif
`ifndef PT_SIZE
`define PT_SIZE (PTE_SIZE * NUM_PTE_ENTRY)
`define PT_SIZE MEM_PAGE_SIZE
`endif
`ifndef PT_TOTAL_SIZE
`define PT_TOTAL_SIZE (PT_SIZE*(1+NUM_PTE_ENTRY))
`endif
`ifndef TLB_SIZE
`define TLB_SIZE (32)
`endif
`endif
`ifndef MEM_PAGE_SIZE
`define MEM_PAGE_SIZE (4096)
`endif
// Pipeline Configuration /////////////////////////////////////////////////////
// Issue width

View file

@ -24,7 +24,7 @@
#define CACHE_BLOCK_SIZE 64
#define RAM_PAGE_SIZE 4096
#define RAM_PAGE_SIZE 4096 // Please use MEM_PAGE_SIZE in VX_config.h
#define ALLOC_BASE_ADDR USER_BASE_ADDR

View file

@ -39,6 +39,15 @@ public:
page_t* currPage = pages_;
while (currPage) {
auto nextPage = currPage->next;
#ifdef VM_ENABLE
block_t* currblock = currPage->findfirstUsedBlock();
block_t* nextblock;
while (currblock) {
nextblock= currblock->nextUsed;
currPage->release(currblock);
currblock = nextblock;
}
#endif
delete currPage;
currPage = nextPage;
}
@ -70,7 +79,7 @@ public:
size = alignSize(size, pageAlign_);
// Check if the reservation is within memory capacity bounds
if (addr + size > capacity_) {
if (addr + size > baseAddress_ + capacity_) {
printf("error: address range out of bounds\n");
return -1;
}
@ -118,12 +127,12 @@ public:
auto pageSize = alignSize(size, pageAlign_);
uint64_t pageAddr;
if (!this->findNextAddress(pageSize, &pageAddr)) {
printf("error: out of memory\n");
printf("error: out of memory (Can't find next address)\n");
return -1;
}
currPage = this->createPage(pageAddr, pageSize);
if (nullptr == currPage) {
printf("error: out of memory\n");
printf("error: out of memory (Can't create a page)\n");
return -1;
}
freeBlock = currPage->findFreeBlock(size);
@ -335,6 +344,11 @@ private:
}
return nullptr;
}
#ifdef VM_ENABLE
block_t* findfirstUsedBlock() {
return usedList_;
}
#endif
private:
@ -480,7 +494,7 @@ private:
bool findNextAddress(uint64_t size, uint64_t* addr) {
if (pages_ == nullptr) {
*addr = baseAddress_;
*addr = baseAddress_;
return true;
}
@ -498,10 +512,10 @@ private:
endOfLastPage = current->addr + current->size;
current = current->next;
}
// If no suitable gap is found, place the new page at the end of the last page
// Check if the allocator has enough capacity
if ((endOfLastPage + size) <= capacity_) {
if ((endOfLastPage + size) <= (baseAddress_ + capacity_)) {
*addr = endOfLastPage;
return true;
}

View file

@ -27,10 +27,8 @@
#include <future>
#include <chrono>
#ifdef VM_ENABLE
#include <VX_config.h>
// #include <vortex.h>
//#include <utils.h>
#ifdef VM_ENABLE
#include <malloc.h>
#include <VX_types.h>
@ -44,42 +42,10 @@
#include <unordered_map>
#include <array>
#include <cmath>
#include <cassert>
#endif
using namespace vortex;
#ifdef VM_ENABLE
#ifndef NDEBUG
#define DBGPRINT(format, ...) do { printf("[VXDRV] " format "", ##__VA_ARGS__); } while (0)
#else
#define DBGPRINT(format, ...) ((void)0)
#endif
#define CHECK_ERR(_expr, _cleanup) \
do { \
auto err = _expr; \
if (err == 0) \
break; \
printf("[VXDRV] Error: '%s' returned %d!\n", #_expr, (int)err); \
_cleanup \
} while (false)
///////////////////////////////////////////////////////////////////////////////
//
#include <bitset>
#include <unistd.h>
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
{
return (addr) & (1 << idx);
}
#endif
class vx_device {
public:
vx_device()
@ -91,14 +57,16 @@ public:
// attach memory module
processor_.attach_ram(&ram_);
#ifdef VM_ENABLE
//Set
set_processor_satp(VM_ADDR_MODE);
CHECK_ERR(init_VM(), );
#endif
}
}
~vx_device() {
#ifdef VM_ENABLE
this->mem_free(PAGE_TABLE_BASE_ADDR); // Right position?
global_mem_.release(PAGE_TABLE_BASE_ADDR);
// for (auto i = addr_mapping.begin(); i != addr_mapping.end(); i++)
// page_table_mem_->release(i->second << MEM_PAGE_SIZE);
delete page_table_mem_;
#endif
if (future_.valid()) {
future_.wait();
@ -154,9 +122,10 @@ public:
bool need_trans(uint64_t dev_pAddr)
{
// Check if the this is the BARE mode
bool isBAREMode = (get_mode() == VA_MODE::BARE);
bool isBAREMode = (get_mode() == BARE);
// Check if the address is reserved for system usage
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
// bool isReserved = (PAGE_TABLE_BASE_ADDR <= dev_pAddr && dev_pAddr < PAGE_TABLE_BASE_ADDR + PT_SIZE_LIMIT);
bool isReserved = (PAGE_TABLE_BASE_ADDR <= dev_pAddr);
// Check if the address is reserved for IO usage
bool isIO = (dev_pAddr < USER_BASE_ADDR);
// Check if the address falls within the startup address range
@ -172,14 +141,12 @@ public:
uint64_t phy_to_virt_map(uint64_t size, uint64_t *dev_pAddr, uint32_t flags)
{
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
DBGPRINT(" [RT:PTV_MAP] size = 0x%lx, dev_pAddr= 0x%lx, flags = 0x%x\n", size, *dev_pAddr, flags);
DBGPRINT(" [RT:PTV_MAP] bit mode: %d\n", XLEN);
// if (*dev_pAddr == STARTUP_ADDR || *dev_pAddr == 0x7FFFF000) {
DBGPRINT(" [RT:PTV_MAP] size = 0x%lx, dev_pAddr= 0x%lx, flags = 0x%x\n", size, *dev_pAddr, flags);
DBGPRINT(" [RT:PTV_MAP] bit mode: %d\n", XLEN);
if (!need_trans(*dev_pAddr))
{
DBGPRINT(" [RT:PTV_MAP] Translation is not needed.\n");
DBGPRINT(" [RT:PTV_MAP] Translation is not needed.\n");
return 0;
}
@ -189,42 +156,30 @@ public:
// dev_pAddr can be of size greater than a page, but we have to map and update
// page tables on a page table granularity. So divide the allocation into pages.
bool is_start = false;
for (ppn = (*dev_pAddr) >> 12; ppn < ((*dev_pAddr) >> 12) + (size / MEM_PAGE_SIZE) + 1; ppn++)
// FUTURE Work: Super Page
for (ppn = (*dev_pAddr >> MEM_PAGE_LOG2_SIZE); ppn < ((*dev_pAddr) >> MEM_PAGE_LOG2_SIZE) + (size >> MEM_PAGE_LOG2_SIZE) ; ppn++)
{
vpn = map_p2v(ppn << 12) >> 12;
if (is_start == false)
{
DBGPRINT(" [RT:PTV_MAP] Search vpn in page table:0x%lx\n", vpn);
is_start = true;
}
else
{
DBGPRINT(" [RT:PTV_MAP] Next vpn: 0x%lx\n", vpn);
}
vpn = map_p2v(ppn << MEM_PAGE_LOG2_SIZE) >> MEM_PAGE_LOG2_SIZE;
DBGPRINT(" [RT:PTV_MAP] Search vpn in page table:0x%lx\n", vpn);
// Currently a 1-1 mapping is used, this can be changed here to support different
// mapping schemes
// If ppn to vpn mapping doesnt exist.
if (addr_mapping.find(vpn) == addr_mapping.end())
{
// Create mapping.
update_page_table(ppn, vpn, flags);
DBGPRINT(" [RT:PTV_MAP] Not found. Allocate new page table or update a PTE.\n");
CHECK_ERR(update_page_table(ppn, vpn, flags),);
addr_mapping[vpn] = ppn;
}
}
DBGPRINT(" [RT:PTV_MAP] Mapped virtual addr: 0x%lx to physical addr: %lx\n", init_vAddr, init_pAddr);
DBGPRINT(" [RT:PTV_MAP] Mapped virtual addr: 0x%lx to physical addr: 0x%lx\n", init_vAddr, init_pAddr);
// Sanity check
uint64_t pAddr = page_table_walk(init_vAddr);
if (pAddr != init_pAddr)
{
assert(pAddr == init_pAddr && "ERROR: translated virtual Addresses are not the same with physical Address");
}
DBGPRINT(" [RT:PTV_MAP] physical addr from PTW: 0x%lx\n", pAddr);
assert(pAddr == init_pAddr && "ERROR: translated virtual Addresses are not the same with physical Address\n");
*dev_pAddr = init_vAddr; // commit vpn to be returned to host
DBGPRINT(" [RT:PTV_MAP] Translated device virtual addr: 0x%lx\n", *dev_pAddr);
DBGPRINT(" [RT:PTV_MAP] Translated device virtual addr: 0x%lx\n", *dev_pAddr);
return 0;
}
@ -232,47 +187,44 @@ public:
int mem_alloc(uint64_t size, int flags, uint64_t *dev_addr)
{
uint64_t asize = aligned_size(size, MEM_PAGE_SIZE);
uint64_t addr = 0;
uint64_t addr;
DBGPRINT(" [RT:mem_alloc] mem_alloc size: 0x%lx\n", size);
CHECK_ERR(global_mem_.allocate(size, &addr), {
DBGPRINT("[RT:mem_alloc] size: 0x%lx, asize, 0x%lx,flag : 0x%d\n", size, asize, flags);
CHECK_ERR(global_mem_.allocate(asize, &addr), {
return err;
});
CHECK_ERR(this->mem_access(addr, size, flags), {
CHECK_ERR(this->mem_access(addr, asize, flags), {
global_mem_.release(addr);
return err;
});
*dev_addr = addr;
#ifdef VM_ENABLE
// VM address translation
phy_to_virt_map(size, dev_addr, flags);
phy_to_virt_map(asize, dev_addr, flags);
#endif
return 0;
}
int mem_reserve(uint64_t dev_addr, uint64_t size, int flags)
{
CHECK_ERR(global_mem_.reserve(dev_addr, size), {
uint64_t asize = aligned_size(size, MEM_PAGE_SIZE);
CHECK_ERR(global_mem_.reserve(dev_addr, asize), {
return err;
});
DBGPRINT(" [RT:mem_reserve] mem_reserve: addr: 0x%lx, size: 0x%lx\n", dev_addr, size);
CHECK_ERR(this->mem_access(dev_addr, size, flags), {
DBGPRINT("[RT:mem_reserve] addr: 0x%lx, asize:0x%lx, size: 0x%lx\n", dev_addr, asize, size);
CHECK_ERR(this->mem_access(dev_addr, asize, flags), {
global_mem_.release(dev_addr);
return err;
});
#ifdef VM_ENABLE
uint64_t paddr = dev_addr;
phy_to_virt_map(size, &paddr, flags);
#endif
return 0;
}
int mem_free(uint64_t dev_addr)
{
#ifdef VM_ENABLE
uint64_t pAddr = page_table_walk(dev_addr);
// VM address translation
return global_mem_.release(pAddr);
uint64_t paddr= page_table_walk(dev_addr);
return global_mem_.release(paddr);
#else
return global_mem_.release(dev_addr);
#endif
@ -313,8 +265,8 @@ public:
ram_.write((const uint8_t *)src, dest_addr, size);
ram_.enable_acl(true);
/*DBGPRINT("upload %ld bytes to 0x%lx\n", size, dest_addr);
/*
DBGPRINT("upload %ld bytes to 0x%lx\n", size, dest_addr);
for (uint64_t i = 0; i < size && i < 1024; i += 4) {
DBGPRINT(" 0x%lx <- 0x%x\n", dest_addr + i, *(uint32_t*)((uint8_t*)src + i));
}*/
@ -418,200 +370,195 @@ public:
*value = mpm_cache_.at(core_id).at(offset);
return 0;
}
#ifdef VM_ENABLE
/* VM Management */
void set_processor_satp(VA_MODE mode)
{
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
uint64_t satp = 0;
if (mode == VA_MODE::BARE)
{
DBGPRINT(" [RT:set_satp] VA_MODE = BARE MODE");
}
else
{
satp = (alloc_2nd_level_page_table() / MEM_PAGE_SIZE) | (1 << SATP_MODE_IDX);
DBGPRINT(" [RT:set_satp] VA_MODE = SV mode (satp = 0x%lx)\n", satp);
}
processor_.set_satp(satp);
}
uint64_t get_ptbr()
{
// return processor_.get_satp();
return processor_.get_satp() & ((1 << SATP_PPN_WIDTH) - 1);
}
uint64_t get_pte_address(uint64_t base_page, uint64_t vpn)
{
return (base_page * MEM_PAGE_SIZE) + (vpn * PTE_SIZE);
}
VA_MODE get_mode()
{
#ifdef XLEN_32
return processor_.get_satp() & (1 << SATP_MODE_IDX) ? VA_MODE::SV32 : VA_MODE::BARE;
#else // 64 bit
return processor_.get_satp() & (1 << SATP_MODE_IDX) ? VA_MODE::SV64 : VA_MODE::BARE;
#endif
}
void update_page_table(uint64_t ppn, uint64_t vpn, uint32_t flag)
{
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
DBGPRINT(" [RT:Update PT] Mapping vpn 0x%05lx to ppn 0x%05lx(flags = %u)\n", vpn, ppn, flag);
assert((((ppn >> 20) == 0) && ((vpn >> 20) == 0)) && "Upper 12 bits are not zero!");
// Updating page table with the following mapping of (vAddr) to (pAddr).
// uint32_t page_bit_shift = log2ceil(PTE_SIZE*NUM_PTE_ENTRY);
uint64_t ppn_1 = 0, pte_addr = 0, pte_bytes = 0;
uint64_t vpn_1 = bits(vpn, 10, 19);
uint64_t vpn_0 = bits(vpn, 0, 9);
// Read first level PTE.
DBGPRINT(" [RT:Update PT]Start second-level page table\n");
pte_addr = get_pte_address(get_ptbr(), vpn_1);
pte_bytes = read_pte(pte_addr);
DBGPRINT(" [RT:Update PT] PTE addr 0x%lx, PTE bytes 0x%lx\n", pte_addr, pte_bytes);
ppn_1 = (pte_bytes >> 10);
if (bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
{
// If valid bit set, proceed to next level using new ppn form PTE.
DBGPRINT(" [RT:Update PT] PTE valid (ppn 0x%lx), continuing the walk...\n", ppn_1);
}
else
{
// If valid bit not set, allocate a second level page table
// in device memory and store ppn in PTE. Set rwx = 000 in PTE
// to indicate this is a pointer to the next level of the page table.
DBGPRINT(" [RT:Update PT] PTE Invalid (ppn 0x%lx), continuing the walk...\n", ppn_1);
ppn_1 = (alloc_1st_level_page_table(vpn_1) >> 12);
pte_bytes = ((ppn_1 << 10) | 0b0000000001);
assert((pte_addr >> 32) == 0 && "Upper 32 bits are not zero!");
write_pte(pte_addr, pte_bytes);
// if (pte_bytes != read_pte(pte_addr))
// DBGPRINT("Read/write values are different!\n");
}
DBGPRINT(" [RT:Update PT] Move to first-level page table\n");
// Read second level PTE.
pte_addr = get_pte_address(ppn_1, vpn_0);
pte_bytes = read_pte(pte_addr);
if (bit(pte_bytes, 0) && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
{
DBGPRINT(" [RT:Update PT] ERROR, shouldn't be here\n");
exit(1);
// If valid bit is set, then the page is already allocated.
// Should not reach this point, a sanity check.
}
else
{
// If valid bit not set, write ppn of pAddr in PTE. Set rwx = 111 in PTE
// to indicate this is a leaf PTE and has the stated permissions.
pte_bytes = ((ppn << 10) | 0b0000001111);
write_pte(pte_addr, pte_bytes);
if (pte_bytes != read_pte(pte_addr))
DBGPRINT(" [RT:Update PT] PTE write value and read value are not matched!\n");
}
}
uint64_t page_table_walk(uint64_t vAddr_bits)
{
// DBGPRINT("====%s====\n", __PRETTY_FUNCTION__);
DBGPRINT(" [RT:PTW] start vAddr: 0x%lx\n", vAddr_bits);
if (!need_trans(vAddr_bits))
{
DBGPRINT(" [RT:PTW] Translation is not needed.\n");
return vAddr_bits;
}
uint64_t LEVELS = 2;
vAddr_SV32_t vAddr(vAddr_bits);
uint64_t pte_addr, pte_bytes;
uint64_t pt_ba = get_ptbr() << 12;
// Get base page table.
for (int i = LEVELS - 1; i >= 0; i--)
{
// Read PTE.
pte_addr = pt_ba + vAddr.vpn[i] * PTE_SIZE;
pte_bytes = read_pte(pte_addr);
PTE_SV32_t pte(pte_bytes);
DBGPRINT(" [RT:PTW] Level[%u] pte_bytes = 0x%lx, pte flags = %u)\n", i, pte.ppn, pte.flags);
// Check if it has invalid flag bits.
if ((pte.v == 0) | ((pte.r == 0) & (pte.w == 1)))
{
std::string msg = " [RT:PTW] Page Fault : Attempted to access invalid entry. Entry: 0x";
throw Page_Fault_Exception(msg);
}
if ((pte.r == 0) & (pte.w == 0) & (pte.x == 0))
{
// Not a leaf node as rwx == 000
if (i == 0)
{
throw Page_Fault_Exception(" [RT:PTW] Page Fault : No leaf node found.");
}
else
{
// Continue on to next level.
pt_ba = pte.ppn << 12;
DBGPRINT(" [RT:PTW] next pt_ba: %p\n", (void *)pt_ba);
}
}
else
{
// Leaf node found, finished walking.
pt_ba = pte.ppn << 12;
DBGPRINT(" [RT:PTW] Found PT_Base_Address [%d] = %lx\n", i, pt_ba);
break;
}
}
// pte_bytes is final leaf
PTE_SV32_t pte(pte_bytes);
// Check RWX permissions according to access type.
if (pte.r == 0)
{
throw Page_Fault_Exception(" [RT:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
}
uint64_t paddr = pt_ba + vAddr.pgoff;
return paddr;
}
uint64_t alloc_2nd_level_page_table()
{
uint64_t addr = PAGE_TABLE_BASE_ADDR;
uint64_t size = PT_TOTAL_SIZE;
CHECK_ERR(this->mem_reserve(addr, size, VX_MEM_READ_WRITE), {
return err;
});
init_page_table(addr);
return addr;
}
uint64_t alloc_1st_level_page_table(uint64_t vpn_1)
{
uint64_t addr = PAGE_TABLE_BASE_ADDR + PT_SIZE * (1 + vpn_1);
init_page_table(addr);
return addr;
}
// Initialize to zero the target page table area. 32bit 4K, 64bit 8K
void init_page_table(uint64_t addr)
uint16_t init_page_table(uint64_t addr, uint64_t size)
{
uint64_t asize = aligned_size(PT_SIZE, CACHE_BLOCK_SIZE);
uint64_t asize = aligned_size(size, CACHE_BLOCK_SIZE);
DBGPRINT(" [RT:init_page_table] (addr=0x%lx, size=0x%lx)\n", addr, asize);
uint8_t *src = new uint8_t[asize];
for (uint64_t i = 0; i < PT_SIZE; ++i)
if (src == NULL)
return 1;
for (uint64_t i = 0; i < asize; ++i)
{
src[i] = 0;
}
ram_.enable_acl(false);
ram_.write((const uint8_t *)src, addr, asize);
ram_.enable_acl(true);
return 0;
}
uint8_t alloc_page_table (uint64_t * pt_addr)
{
CHECK_ERR(page_table_mem_->allocate(PT_SIZE, pt_addr), { return err; });
CHECK_ERR(init_page_table(*pt_addr, PT_SIZE), { return err; });
DBGPRINT(" [RT:alloc_page_table] addr= 0x%lx\n", *pt_addr);
return 0;
}
int16_t init_VM()
{
uint64_t pt_addr = 0;
// Reserve space for PT
DBGPRINT("[RT:init_VM] Initialize VM\n");
CHECK_ERR(mem_reserve(PAGE_TABLE_BASE_ADDR, PT_SIZE_LIMIT, VX_MEM_READ_WRITE), {
return err;
});
page_table_mem_ = new MemoryAllocator (PAGE_TABLE_BASE_ADDR, PT_SIZE_LIMIT, MEM_PAGE_SIZE, CACHE_BLOCK_SIZE);
if (page_table_mem_ == NULL)
{
CHECK_ERR(this->mem_free(PAGE_TABLE_BASE_ADDR),);
return 1;
}
if (VM_ADDR_MODE == BARE)
DBGPRINT("[RT:init_VM] VA_MODE = BARE MODE(addr= 0x0)");
else
CHECK_ERR(alloc_page_table(&pt_addr),{return err;});
CHECK_ERR(processor_.set_satp_by_addr(pt_addr),{return err;});
return 0;
}
// Return value in in ptbr
uint64_t get_base_ppn()
{
return processor_.get_base_ppn();
}
uint64_t get_pte_address(uint64_t base_ppn, uint64_t vpn)
{
return (base_ppn * PT_SIZE) + (vpn * PTE_SIZE);
}
uint8_t get_mode()
{
return processor_.get_satp_mode();
}
int16_t update_page_table(uint64_t ppn, uint64_t vpn, uint32_t flag)
{
DBGPRINT(" [RT:Update PT] Mapping vpn 0x%05lx to ppn 0x%05lx(flags = %u)\n", vpn, ppn, flag);
// sanity check
#if VM_ADDR_MODE == SV39
assert((((ppn >> 44) == 0) && ((vpn >> 27) == 0)) && "Upper bits are not zero!");
uint8_t level = 3;
#else // Default is SV32, BARE will not reach this point.
assert((((ppn >> 20) == 0) && ((vpn >> 20) == 0)) && "Upper 12 bits are not zero!");
uint8_t level = 2;
#endif
int i = level - 1;
vAddr_t vaddr(vpn << MEM_PAGE_LOG2_SIZE);
uint64_t pte_addr = 0, pte_bytes = 0;
uint64_t pt_addr = 0;
uint64_t cur_base_ppn = get_base_ppn();
while (i >= 0)
{
DBGPRINT(" [RT:Update PT]Start %u-level page table\n", i);
pte_addr = get_pte_address(cur_base_ppn, vaddr.vpn[i]);
pte_bytes = read_pte(pte_addr);
PTE_t pte_chk(pte_bytes);
DBGPRINT(" [RT:Update PT] PTE addr 0x%lx, PTE bytes 0x%lx\n", pte_addr, pte_bytes);
if (pte_chk.v == 1 && ((pte_bytes & 0xFFFFFFFF) != 0xbaadf00d))
{
DBGPRINT(" [RT:Update PT] PTE valid (ppn 0x%lx), continuing the walk...\n", pte_chk.ppn);
cur_base_ppn = pte_chk.ppn;
}
else
{
// If valid bit not set, allocate a next level page table
DBGPRINT(" [RT:Update PT] PTE Invalid (ppn 0x%lx) ...\n", pte_chk.ppn);
if (i == 0)
{
// Reach to leaf
DBGPRINT(" [RT:Update PT] Reached to level 0. This should be a leaf node(flag = %x) \n",flag);
uint32_t pte_flag = (flag << 1) | 0x3;
PTE_t new_pte(ppn <<MEM_PAGE_LOG2_SIZE, pte_flag);
write_pte(pte_addr, new_pte.pte_bytes);
break;
}
else
{
// in device memory and store ppn in PTE. Set rwx = 000 in PTE
// to indicate this is a pointer to the next level of the page table.
// flag would READ: 0x1, Write 0x2, RW:0x3, which is matched with PTE flags if it is lsh by one.
alloc_page_table(&pt_addr);
uint32_t pte_flag = 0x1;
PTE_t new_pte(pt_addr, pte_flag);
write_pte(pte_addr, new_pte.pte_bytes);
cur_base_ppn = new_pte.ppn;
}
}
i--;
}
return 0;
}
uint64_t page_table_walk(uint64_t vAddr_bits)
{
DBGPRINT(" [RT:PTW] start vAddr: 0x%lx\n", vAddr_bits);
if (!need_trans(vAddr_bits))
{
DBGPRINT(" [RT:PTW] Translation is not needed.\n");
return vAddr_bits;
}
uint8_t level = PT_LEVEL;
int i = level-1;
vAddr_t vaddr(vAddr_bits);
uint64_t pte_addr = 0, pte_bytes = 0;
uint64_t cur_base_ppn = get_base_ppn();
while (true)
{
DBGPRINT(" [RT:PTW]Start %u-level page table walk\n",i);
// Read PTE.
pte_addr = get_pte_address(cur_base_ppn, vaddr.vpn[i]);
pte_bytes = read_pte(pte_addr);
PTE_t pte(pte_bytes);
DBGPRINT(" [RT:PTW] PTE addr 0x%lx, PTE bytes 0x%lx\n", pte_addr, pte_bytes);
assert(((pte.pte_bytes & 0xFFFFFFFF) != 0xbaadf00d) && "ERROR: uninitialzed PTE\n" );
// Check if it has invalid flag bits.
if ((pte.v == 0) | ((pte.r == 0) & (pte.w == 1)))
{
std::string msg = " [RT:PTW] Page Fault : Attempted to access invalid entry.";
throw Page_Fault_Exception(msg);
}
if ((pte.r == 0) & (pte.w == 0) & (pte.x == 0))
{
i--;
// Not a leaf node as rwx == 000
if (i < 0)
{
throw Page_Fault_Exception(" [RT:PTW] Page Fault : No leaf node found.");
}
else
{
// Continue on to next level.
cur_base_ppn= pte.ppn ;
DBGPRINT(" [RT:PTW] next base_ppn: 0x%lx\n", cur_base_ppn);
continue;
}
}
else
{
// Leaf node found.
// Check RWX permissions according to access type.
if (pte.r == 0)
{
throw Page_Fault_Exception(" [RT:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
}
cur_base_ppn= pte.ppn ;
DBGPRINT(" [RT:PTW] Found PT_Base_Address(0x%lx) on Level %d.\n", pte.ppn,i);
break;
}
}
uint64_t paddr = (cur_base_ppn << MEM_PAGE_LOG2_SIZE) + vaddr.pgoff;
return paddr;
}
// void read_page_table(uint64_t addr) {
@ -652,7 +599,7 @@ public:
return ret;
}
#endif // JAEWON
#endif // VM_ENABLE
private:
Arch arch_;
@ -664,6 +611,7 @@ private:
std::unordered_map<uint32_t, std::array<uint64_t, 32>> mpm_cache_;
#ifdef VM_ENABLE
std::unordered_map<uint64_t, uint64_t> addr_mapping;
MemoryAllocator* page_table_mem_;
#endif
};

View file

@ -21,6 +21,7 @@
#include <bitset>
using namespace vortex;
#ifdef VM_ENABLE
#ifndef NDEBUG
#define DBGPRINT(format, ...) do { printf("[VXDRV] " format "", ##__VA_ARGS__); } while (0)
@ -29,16 +30,6 @@ using namespace vortex;
#endif
#endif
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
{
return (addr) & (1 << idx);
}
RamMemDevice::RamMemDevice(const char *filename, uint32_t wordSize)
: wordSize_(wordSize) {
@ -124,6 +115,7 @@ void MemoryUnit::ADecoder::map(uint64_t start, uint64_t end, MemDevice &md) {
void MemoryUnit::ADecoder::read(void* data, uint64_t addr, uint64_t size) {
mem_accessor_t ma;
if (!this->lookup(addr, size, &ma)) {
assert(0);
std::cout << "lookup of 0x" << std::hex << addr << " failed.\n";
throw BadAddress();
}
@ -133,6 +125,7 @@ void MemoryUnit::ADecoder::read(void* data, uint64_t addr, uint64_t size) {
void MemoryUnit::ADecoder::write(const void* data, uint64_t addr, uint64_t size) {
mem_accessor_t ma;
if (!this->lookup(addr, size, &ma)) {
assert(0);
std::cout << "lookup of 0x" << std::hex << addr << " failed.\n";
throw BadAddress();
}
@ -208,7 +201,7 @@ std::pair<bool, uint64_t> MemoryUnit::tlbLookup(uint64_t vAddr, ACCESS_TYPE type
}
//Check access permissions.
if ( (type == ACCESS_TYPE::FENCE) & ((e.r == 0) | (e.x == 0)) )
if ( (type == ACCESS_TYPE::FETCH) & ((e.r == 0) | (e.x == 0)) )
{
throw Page_Fault_Exception("Page Fault : Incorrect permissions.");
}
@ -601,12 +594,33 @@ void RAM::loadHexImage(const char* filename) {
#ifdef VM_ENABLE
uint64_t MemoryUnit::get_base_ppn()
{
return satp_->get_base_ppn();
}
uint64_t MemoryUnit::get_satp()
{
return satp_->get_satp();
}
uint8_t MemoryUnit::get_mode()
{
return satp_->get_mode();
}
void MemoryUnit::set_satp(uint64_t satp)
{
// uint16_t asid = 0; // set asid for different process
satp_ = new SATP_t (satp );
}
bool MemoryUnit::need_trans(uint64_t dev_pAddr)
{
// Check if the this is the BARE mode
bool isBAREMode = (this->mode == VA_MODE::BARE);
bool isBAREMode = (get_mode() == BARE);
// Check if the address is reserved for system usage
bool isReserved = (dev_pAddr >= PAGE_TABLE_BASE_ADDR);
// bool isReserved = (PAGE_TABLE_BASE_ADDR <= dev_pAddr && dev_pAddr < PAGE_TABLE_BASE_ADDR + PT_SIZE_LIMIT);
bool isReserved = (PAGE_TABLE_BASE_ADDR <= dev_pAddr);
// Check if the address is reserved for IO usage
bool isIO= (dev_pAddr < USER_BASE_ADDR);
// Check if the address falls within the startup address range
@ -634,7 +648,6 @@ uint64_t MemoryUnit::vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type)
if (tlb_access.first)
{
// printf("Found pfn %lx in TLB\n",tlb_access.second);
pfn = tlb_access.second;
TLB_HIT++;
}
@ -649,91 +662,86 @@ uint64_t MemoryUnit::vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type)
}
//Construct final address using pfn and offset.
DBGPRINT(" [MMU: V2P] translated vAddr: 0x%lx to pAddr 0x%lx",vAddr,((pfn << size_bits) + (vAddr & ((1 << size_bits) - 1))));
DBGPRINT(" [MMU: V2P] translated vAddr: 0x%lx to pAddr 0x%lx\n",vAddr,((pfn << size_bits) + (vAddr & ((1 << size_bits) - 1))));
return (pfn << size_bits) + (vAddr & ((1 << size_bits) - 1));
}
std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint64_t* size_bits)
{
DBGPRINT(" [MMU:PTW] Start: vaddr = 0x%lx, type = %u, size_bits %lu\n", vAddr_bits, type, *size_bits);
uint64_t LEVELS = 2;
vAddr_SV32_t vAddr(vAddr_bits);
uint64_t pte_bytes = 0;
uint64_t MemoryUnit::get_pte_address(uint64_t base_ppn, uint64_t vpn)
{
return (base_ppn * PT_SIZE) + (vpn * PTE_SIZE);
}
uint64_t pte_addr =0;
//Get base page table.
uint64_t pt_ba = this->ptbr << 12;
int i = LEVELS - 1;
std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint64_t *size_bits)
{
DBGPRINT(" [MMU:PTW] Start: vaddr = 0x%lx, type = %u.\n", vAddr_bits, type);
uint8_t level = PT_LEVEL;
int i = level-1;
vAddr_t vaddr(vAddr_bits);
uint32_t flags =0;
uint64_t pte_addr = 0, pte_bytes = 0;
uint64_t cur_base_ppn = get_base_ppn();
// Need to fix for super page
*size_bits = 12;
while(true)
while (true)
{
// Read PTE.
pte_addr = get_pte_address(cur_base_ppn, vaddr.vpn[i]);
decoder_.read(&pte_bytes, pte_addr, PTE_SIZE);
PTE_t pte(pte_bytes);
DBGPRINT(" [MMU:PTW] Level[%u] pte_addr=0x%lx, pte_bytes =0x%lx, pte.ppn= 0x%lx, pte.flags = %u)\n", i, pte_addr, pte_bytes, pte.ppn, pte.flags);
assert(((pte.pte_bytes & 0xFFFFFFFF) != 0xbaadf00d) && "ERROR: uninitialzed PTE\n" );
// Check if it has invalid flag bits.
if ((pte.v == 0) | ((pte.r == 0) & (pte.w == 1)))
{
assert(0);
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : Attempted to access invalid entry.");
}
//Read PTE.
pte_addr = pt_ba+vAddr.vpn[i] * PTE_SIZE;
decoder_.read(&pte_bytes, pte_addr, PTE_SIZE);
PTE_SV32_t pte(pte_bytes);
DBGPRINT(" [MMU:PTW] Level[%u] pte_bytes = 0x%lx, pte flags = %u)\n", i, pte.ppn , pte.flags);
//Check if it has invalid flag bits.
if ( (pte.v == 0) | ( (pte.r == 0) & (pte.w == 1) ) )
if ((pte.r == 0) & (pte.w == 0) & (pte.x == 0))
{
// Not a leaf node as rwx == 000
i--;
if (i < 0)
{
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : Attempted to access invalid entry.");
}
if ( (pte.r == 0) & (pte.w == 0) & (pte.x == 0))
{
//Not a leaf node as rwx == 000
i--;
if (i < 0)
{
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : No leaf node found.");
}
else
{
//Continue on to next level.
pt_ba = (pte_bytes >> 10 ) << 12;
}
assert(0);
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : No leaf node found.");
}
else
{
//Leaf node found, finished walking.
pt_ba = (pte_bytes >> 10 ) << 12;
break;
// Continue on to next level.
cur_base_ppn= pte.ppn;
DBGPRINT(" [MMU:PTW] next base_ppn: 0x%lx\n", cur_base_ppn);
continue;
}
}
PTE_SV32_t pte(pte_bytes);
//Check RWX permissions according to access type.
if ( (type == ACCESS_TYPE::FENCE) & ((pte.r == 0) | (pte.x == 0)) )
else
{
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE FENCE, Incorrect permissions.");
// Leaf node found, finished walking.
// Check RWX permissions according to access type.
if ((type == ACCESS_TYPE::FETCH) & ((pte.r == 0) | (pte.x == 0)))
{
assert(0);
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE FETCH, Incorrect permissions.");
}
else if ((type == ACCESS_TYPE::LOAD) & (pte.r == 0))
{
assert(0);
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
}
else if ((type == ACCESS_TYPE::STORE) & (pte.w == 0))
{
assert(0);
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE STORE, Incorrect permissions.");
}
cur_base_ppn = pte.ppn;
flags = pte.flags;
break;
}
else if ( (type == ACCESS_TYPE::LOAD) & (pte.r == 0) )
{
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE LOAD, Incorrect permissions.");
}
else if ( (type == ACCESS_TYPE::STORE) & (pte.w == 0) )
{
throw Page_Fault_Exception(" [MMU:PTW] Page Fault : TYPE STORE, Incorrect permissions.");
}
*size_bits = 12;
uint64_t pfn = pt_ba >> *size_bits;
return std::make_pair(pfn, pte_bytes & 0xff);
}
return std::make_pair(cur_base_ppn, flags);
}
uint64_t MemoryUnit::get_satp()
{
return satp;
}
void MemoryUnit::set_satp(uint64_t satp)
{
this->satp = satp;
this->ptbr = satp & ( (1<< SATP_PPN_WIDTH) - 1);
#ifdef XLEN_32
this->mode = satp & (1<< SATP_MODE_IDX) ? VA_MODE::SV32 : VA_MODE::BARE;
#else // 64 bit
this->mode = satp & (1<< SATP_MODE_IDX) ? VA_MODE::SV64 : VA_MODE::BARE;
#endif
}
#endif

View file

@ -32,17 +32,85 @@ namespace vortex {
#ifdef VM_ENABLE
enum VA_MODE {
BARE,
SV32,
SV64
};
// VA MODE
#define BARE 0x0
#define SV32 0x1
#define SV39 0x8
enum ACCESS_TYPE {
LOAD,
STORE,
FENCE
FETCH
};
class SATP_t
{
private:
uint64_t address;
uint16_t asid;
uint8_t mode;
uint64_t ppn;
uint64_t satp;
uint64_t bits(uint64_t input, uint8_t s_idx, uint8_t e_idx)
{
return (input>> s_idx) & (((uint64_t)1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t input , uint8_t idx)
{
return (input ) & ((uint64_t)1 << idx);
}
public:
SATP_t(uint64_t satp) : satp(satp)
{
#ifdef XLEN_32
mode = bit(satp, 31);
asid = bits(satp, 22, 30);
ppn = bits(satp, 0,21);
#else
mode = bits(satp, 60,63);
asid = bits(satp, 44, 59);
ppn = bits(satp, 0,43);
#endif
address = ppn << MEM_PAGE_LOG2_SIZE;
}
SATP_t(uint64_t address, uint16_t asid) : address(address), asid(asid)
{
#ifdef XLEN_32
assert((address >> 32) == 0 && "Upper 32 bits are not zero!");
#endif
mode= VM_ADDR_MODE;
// asid = 0 ;
ppn = address >> MEM_PAGE_LOG2_SIZE;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshift-count-overflow"
#ifdef XLEN_32
satp = (((uint64_t)mode << 31) | ((uint64_t)asid << 22) | ppn);
#else
satp = (((uint64_t)mode << 60) | ((uint64_t)asid << 44) | ppn);
#endif
#pragma GCC diagnostic pop
}
uint8_t get_mode()
{
return mode;
}
uint16_t get_asid()
{
return asid;
}
uint64_t get_base_ppn()
{
return ppn;
}
uint64_t get_satp()
{
return satp;
}
};
class Page_Fault_Exception : public std::runtime_error /* or logic_error */
{
@ -119,6 +187,7 @@ public:
#ifdef VM_ENABLE
MemoryUnit(uint64_t pageSize = MEM_PAGE_SIZE);
~MemoryUnit(){delete this->satp_;};
#else
MemoryUnit(uint64_t pageSize = 0);
#endif
@ -139,7 +208,9 @@ public:
#ifdef VM_ENABLE
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags, uint64_t size_bits);
uint64_t get_satp();
uint64_t get_satp();
uint8_t get_mode();
uint64_t get_base_ppn();
void set_satp(uint64_t satp);
#else
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags);
@ -228,6 +299,7 @@ private:
bool need_trans(uint64_t dev_pAddr);
uint64_t vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type);
uint64_t get_pte_address(uint64_t base_ppn, uint64_t vpn);
std::pair<uint64_t, uint8_t> page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint64_t* size_bits);
#else
uint64_t toPhyAddr(uint64_t vAddr, uint32_t flagMask);
@ -245,13 +317,9 @@ private:
amo_reservation_t amo_reservation_;
#ifdef VM_ENABLE
uint64_t satp;
VA_MODE mode;
uint64_t ptbr;
std::unordered_set<uint64_t> unique_translations;
uint64_t TLB_HIT, TLB_MISS, TLB_EVICT, PTW, PERF_UNIQUE_PTW;
SATP_t *satp_;
#endif
};
@ -322,68 +390,146 @@ private:
};
#ifdef VM_ENABLE
class PTE_SV32_t
class PTE_t
{
private:
uint64_t address;
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
uint64_t bits(uint64_t input, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
return (input>> s_idx) & (((uint64_t)1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint8_t idx)
bool bit(uint64_t input, uint8_t idx)
{
return (address) & (1 << idx);
return (input) & ((uint64_t)1 << idx);
}
public:
#if VM_ADDR_MODE == SV39
bool N;
uint8_t PBMT;
#endif
uint64_t ppn;
uint32_t rsw;
uint32_t flags;
uint8_t level;
bool d, a, g, u, x, w, r, v;
PTE_SV32_t(uint64_t address) : address(address)
{
assert((address>> 32) == 0 && "Upper 32 bits are not zero!");
flags = bits(address,0,7);
rsw = bits(address,8,9);
ppn = bits(address,10,31);
uint64_t pte_bytes;
d = bit(7);
a = bit(6);
g = bit(5);
u = bit(4);
x = bit(3);
w = bit(2);
r = bit(1);
v = bit(0);
// printf("ppn = 0x%lx, flags= 0x%x, rsw= 0x%x\n",ppn,flags,rsw);
void set_flags (uint32_t flag)
{
this->flags = flag;
d = bit(flags,7);
a = bit(flags,6);
g = bit(flags,5);
u = bit(flags,4);
x = bit(flags,3);
w = bit(flags,2);
r = bit(flags,1);
v = bit(flags,0);
}
PTE_t(uint64_t address, uint32_t flags) : address(address)
{
#if VM_ADDR_MODE == SV39
N = 0;
PBMT = 0;
level = 3;
ppn = address >> MEM_PAGE_LOG2_SIZE;
// Reserve for Super page support
// ppn = new uint32_t [level];
// ppn[2]=bits(address,28,53);
// ppn[1]=bits(address,19,27);
// ppn[0]=bits(address,10,18);
set_flags(flags);
// pte_bytes = (N << 63) | (PBMT << 61) | (ppn <<10) | flags ;
pte_bytes = (ppn <<10) | flags ;
#else // if VM_ADDR_MODE == SV32
assert((address>> 32) == 0 && "Upper 32 bits are not zero!");
level = 2;
ppn = address >> MEM_PAGE_LOG2_SIZE;
// Reserve for Super page support
// ppn = new uint32_t[level];
// ppn[1]=bits(address,20,31);
// ppn[0]=bits(address,10,19);
set_flags(flags);
pte_bytes = ppn <<10 | flags ;
#endif
}
PTE_t(uint64_t pte_bytes) : pte_bytes(pte_bytes)
{
#if VM_ADDR_MODE == SV39
N = bit(pte_bytes,63);
PBMT = bits(pte_bytes,61,62);
level = 3;
ppn=bits(pte_bytes,10,53);
address = ppn << MEM_PAGE_LOG2_SIZE;
// Reserve for Super page support
// ppn = new uint32_t [level];
// ppn[2]=bits(pte_bytes,28,53);
// ppn[1]=bits(pte_bytes,19,27);
// ppn[0]=bits(pte_bytes,10,18);
#else //#if VM_ADDR_MODE == SV32
assert((pte_bytes >> 32) == 0 && "Upper 32 bits are not zero!");
level = 2;
ppn=bits(pte_bytes,10, 31);
address = ppn << MEM_PAGE_LOG2_SIZE;
// Reserve for Super page support
// ppn = new uint32_t[level];
// ppn[1]=bits(address, 20,31);
// ppn[0]=bits(address, 10,19);
#endif
rsw = bits(pte_bytes,8,9);
set_flags((uint32_t)(bits(pte_bytes,0,7)));
}
~PTE_t()
{
// Reserve for Super page support
// delete ppn;
}
};
class vAddr_SV32_t
class vAddr_t
{
private:
uint64_t address;
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
uint64_t bits(uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
return (address>> s_idx) & (((uint64_t)1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
bool bit( uint8_t idx)
{
return (addr) & (1 << idx);
return (address) & ((uint64_t)1 << idx);
}
public:
uint64_t vpn[2];
uint64_t *vpn;
uint64_t pgoff;
vAddr_SV32_t(uint64_t address) : address(address)
uint8_t level;
vAddr_t(uint64_t address) : address(address)
{
#if VM_ADDR_MODE == SV39
level = 3;
vpn = new uint64_t [level];
vpn[2] = bits(30,38);
vpn[1] = bits(21,29);
vpn[0] = bits(12,20);
pgoff = bits(0,11);
#else //#if VM_ADDR_MODE == SV32
assert((address>> 32) == 0 && "Upper 32 bits are not zero!");
vpn[0] = bits(address,12,21);
vpn[1] = bits(address,22,31);
pgoff = bits(address,0,11);
// printf("vpn[1] = 0x%lx, vpn[0] = 0x%lx, pgoff = 0x%lx\n",vpn[1],vpn[0],pgoff);
level = 2;
vpn = new uint64_t [level];
vpn[1] = bits(22,31);
vpn[0] = bits(12,21);
pgoff = bits(0,11);
#endif
}
~vAddr_t()
{
delete vpn;
}
};
#endif

View file

@ -116,7 +116,7 @@ void Emulator::clear() {
void Emulator::attach_ram(RAM* ram) {
// bind RAM to memory unit
#if (XLEN == 64)
mmu_.attach(*ram, 0, 0xFFFFFFFFFFFFFFFF);
mmu_.attach(*ram, 0, 0x7FFFFFFFFF); //39bit SV39
#else
mmu_.attach(*ram, 0, 0xFFFFFFFF);
#endif
@ -271,11 +271,11 @@ bool Emulator::barrier(uint32_t bar_id, uint32_t count, uint32_t wid) {
#ifdef VM_ENABLE
void Emulator::icache_read(void *data, uint64_t addr, uint32_t size) {
// DP(1, "*** icache_read 0x" << std::hex << addr << ", size = 0x " << size);
DP(3, "*** icache_read 0x" << std::hex << addr << ", size = 0x " << size);
try
{
mmu_.read(data, addr, size, ACCESS_TYPE::LOAD);
mmu_.read(data, addr, size, ACCESS_TYPE::FETCH);
}
catch (Page_Fault_Exception& page_fault)
{
@ -306,8 +306,7 @@ void Emulator::dcache_read(void *data, uint64_t addr, uint32_t size) {
} else {
try
{
// mmu_.read(data, addr, size, 0);
mmu_.read(data, addr, size, ACCESS_TYPE::LOAD);
mmu_.read(data, addr, size, ACCESS_TYPE::LOAD);
}
catch (Page_Fault_Exception& page_fault)
{

View file

@ -84,7 +84,7 @@ int main(int argc, char **argv) {
Arch arch(num_threads, num_warps, num_cores);
// create memory module
RAM ram(0, RAM_PAGE_SIZE);
RAM ram(0, MEM_PAGE_SIZE);
// create processor
Processor processor(arch);

View file

@ -149,6 +149,9 @@ Processor::Processor(const Arch& arch)
Processor::~Processor() {
delete impl_;
#ifdef VM_ENABLE
delete satp_;
#endif
}
void Processor::attach_ram(RAM* mem) {
@ -164,13 +167,19 @@ void Processor::dcr_write(uint32_t addr, uint32_t value) {
}
#ifdef VM_ENABLE
uint64_t Processor::get_satp() {
// std::cout << "get SATP: 0x" << std::hex << this->satp << std::endl;
return this->satp;
}
void Processor::set_satp(uint64_t satp) {
int16_t Processor::set_satp_by_addr(uint64_t base_addr) {
uint16_t asid = 0;
satp_ = new SATP_t (base_addr,asid);
if (satp_ == NULL)
return 1;
uint64_t satp = satp_->get_satp();
impl_->set_satp(satp);
this->satp = satp;
return 0;
}
uint8_t Processor::get_satp_mode() {
return satp_->get_mode();
}
uint64_t Processor::get_base_ppn() {
return satp_->get_base_ppn();
}
#endif

View file

@ -22,6 +22,9 @@ namespace vortex {
class Arch;
class RAM;
class ProcessorImpl;
#ifdef VM_ENABLE
class SATP_t;
#endif
class Processor {
public:
@ -34,14 +37,15 @@ public:
void dcr_write(uint32_t addr, uint32_t value);
#ifdef VM_ENABLE
uint64_t get_satp();
void set_satp(uint64_t satp);
uint8_t get_satp_mode();
uint64_t get_base_ppn();
int16_t set_satp_by_addr(uint64_t addr);
#endif
private:
ProcessorImpl* impl_;
#ifdef VM_ENABLE
uint64_t satp;
SATP_t *satp_;
#endif
};

View file

@ -62,7 +62,7 @@ void kernel_body(kernel_arg_t* __UNIFORM__ arg) {
value *= 5;
break;
default:
assert(task_id < arg->num_points);
//assert(task_id < arg->num_points);
break;
}