vm changes

This commit is contained in:
Keerthan Tito 2022-12-12 06:44:26 -05:00
parent 88ed687557
commit afa72061bc
7 changed files with 632 additions and 62 deletions

View file

@ -5,6 +5,7 @@
#include <iostream>
#include <future>
#include <chrono>
#include <bitset>
#include <vortex.h>
#include <vx_utils.h>
@ -19,7 +20,14 @@
#include <mem.h>
#include <constants.h>
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
{
return (addr) & (1 << idx);
}
using namespace vortex;
///////////////////////////////////////////////////////////////////////////////
@ -73,8 +81,9 @@ public:
RAM_PAGE_SIZE,
CACHE_BLOCK_SIZE)
{
// attach memory module
processor_.attach_ram(&ram_);
//Sets more
set_processor_satp(VM_ADDR_MODE);
}
~vx_device() {
@ -82,9 +91,38 @@ public:
future_.wait();
}
}
int map_local_mem(uint64_t size, uint64_t dev_maddr)
{
if (get_mode() == VA_MODE::BARE)
return 0;
uint32_t ppn = dev_maddr >> 12;
uint32_t vpn = ppn;
//dev_maddr can be of size greater than a page, but we have to map and update
//page tables on a page table granularity. So divide the allocation into pages.
for (ppn = (dev_maddr) >> 12; ppn < ((dev_maddr) >> 12) + (size/RAM_PAGE_SIZE) + 1; ppn++)
{
//Currently a 1-1 mapping is used, this can be changed here to support different
//mapping schemes
vpn = ppn;
//If ppn to vpn mapping doesnt exist.
if (addr_mapping.find(vpn) == addr_mapping.end())
{
//Create mapping.
update_page_table(ppn, vpn);
addr_mapping[vpn] = ppn;
}
}
return 0;
}
int alloc_local_mem(uint64_t size, uint64_t* dev_maddr) {
return mem_allocator_.allocate(size, dev_maddr);
int err = mem_allocator_.allocate(size, dev_maddr);
map_local_mem(size, *dev_maddr);
return err;
}
int free_local_mem(uint64_t dev_maddr) {
@ -96,13 +134,13 @@ public:
if (dest_addr + asize > LOCAL_MEM_SIZE)
return -1;
ram_.write((const uint8_t*)src + src_offset, dest_addr, asize);
/*printf("VXDRV: upload %d bytes to 0x%x\n", size, dest_addr);
for (int i = 0; i < size; i += 4) {
printf("mem-write: 0x%x <- 0x%x\n", dest_addr + i, *(uint32_t*)((uint8_t*)src + src_offset + i));
}*/
if (dest_addr >= STARTUP_ADDR)
map_local_mem(asize,dest_addr);
else if (dest_addr >= 0x7fff0000)
{
map_local_mem(asize,dest_addr);
}
ram_.write((const uint8_t*)src + src_offset, dest_addr, asize);
return 0;
}
@ -148,14 +186,152 @@ public:
break;
}
return 0;
}
}
void set_processor_satp(VA_MODE mode)
{
uint32_t satp;
if (mode == VA_MODE::BARE)
satp = 0;
else if (mode == VA_MODE::SV32)
{
satp = (alloc_page_table() >> 10) | 0x80000000;
}
processor_.set_satp(satp);
}
uint32_t get_ptbr()
{
return processor_.get_satp() & 0x003fffff;
}
VA_MODE get_mode()
{
return processor_.get_satp() & 0x80000000 ? VA_MODE::SV32 : VA_MODE::BARE;
}
void update_page_table(uint32_t pAddr, uint32_t vAddr) {
//Updating page table with the following mapping of (vAddr) to (pAddr).
uint32_t ppn_0, ppn_1, pte_addr, pte_bytes;
uint32_t vpn_1 = bits(vAddr, 10, 19);
uint32_t vpn_0 = bits(vAddr, 0, 9);
//Read first level PTE.
pte_addr = (get_ptbr() << 12) + (vpn_1 * PTE_SIZE);
pte_bytes = read_pte(pte_addr);
if ( bit(pte_bytes, 0) )
{
//If valid bit set, proceed to next level using new ppn form PTE.
ppn_1 = (pte_bytes >> 10);
}
else
{
//If valid bit not set, allocate a second level page table
// in device memory and store ppn in PTE. Set rwx = 000 in PTE
//to indicate this is a pointer to the next level of the page table.
ppn_1 = (alloc_page_table() >> 12);
pte_bytes = ( (ppn_1 << 10) | 0b0000000001) ;
write_pte(pte_addr, pte_bytes);
}
//Read second level PTE.
pte_addr = (ppn_1 << 12) + (vpn_0 * PTE_SIZE);
pte_bytes = read_pte(pte_addr);
if ( bit(pte_bytes, 0) )
{
//If valid bit is set, then the page is already allocated.
//Should not reach this point, a sanity check.
}
else
{
//If valid bit not set, write ppn of pAddr in PTE. Set rwx = 111 in PTE
//to indicate this is a leaf PTE and has the stated permissions.
pte_bytes = ( (pAddr << 10) | 0b0000001111) ;
write_pte(pte_addr, pte_bytes);
//If super paging is enabled.
if (SUPER_PAGING)
{
//Check if this second level Page Table can be promoted to a super page. Brute force
//method is used to iterate over all PTE entries of the table and check if they have
//their valid bit set.
bool superpage = true;
for(int i = 0; i < 1024; i++)
{
pte_addr = (ppn_1 << 12) + (i * PTE_SIZE);
pte_bytes = read_pte(pte_addr);
if (!bit(pte_bytes, 0))
{
superpage = false;
break;
}
}
if (superpage)
{
//This can be promoted to a super page. Set root PTE to the first PTE of the
//second level. This is because the first PTE of the second level already has the
//correct PPN1, PPN0 set to zero and correct access bits.
pte_addr = (ppn_1 << 12);
pte_bytes = read_pte(pte_addr);
pte_addr = (get_ptbr() << 12) + (vpn_1 * PTE_SIZE);
write_pte(pte_addr, pte_bytes);
}
}
}
}
uint32_t alloc_page_table() {
uint64_t addr;
mem_allocator_.allocate(RAM_PAGE_SIZE, &addr);
init_page_table(addr);
return addr;
}
void init_page_table(uint32_t addr) {
uint64_t asize = aligned_size(RAM_PAGE_SIZE, CACHE_BLOCK_SIZE);
uint8_t *src = new uint8_t[RAM_PAGE_SIZE];
for (uint32_t i = 0; i < RAM_PAGE_SIZE; ++i) {
src[i] = (0x00000000 >> ((i & 0x3) * 8)) & 0xff;
}
ram_.write((const uint8_t*)src, addr, asize);
}
void read_page_table(uint32_t addr) {
uint8_t *dest = new uint8_t[RAM_PAGE_SIZE];
download(dest, addr, RAM_PAGE_SIZE, 0);
printf("VXDRV: download %d bytes from 0x%x\n", RAM_PAGE_SIZE, addr);
for (int i = 0; i < RAM_PAGE_SIZE; i += 4) {
printf("mem-read: 0x%x -> 0x%x\n", addr + i, *(uint32_t*)((uint8_t*)dest + i));
}
}
void write_pte(uint32_t addr, uint32_t value = 0xbaadf00d) {
uint8_t *src = new uint8_t[PTE_SIZE];
for (uint32_t i = 0; i < PTE_SIZE; ++i) {
src[i] = (value >> ((i & 0x3) * 8)) & 0xff;
}
ram_.write((const uint8_t*)src, addr, PTE_SIZE);
}
uint32_t read_pte(uint32_t addr) {
uint8_t *dest = new uint8_t[PTE_SIZE];
ram_.read((uint8_t*)dest, addr, PTE_SIZE);
return *(uint32_t*)((uint8_t*)dest);
}
private:
ArchDef arch_;
RAM ram_;
Processor processor_;
MemoryAllocator mem_allocator_;
std::future<void> future_;
std::unordered_map<uint32_t, uint32_t> addr_mapping;
};
///////////////////////////////////////////////////////////////////////////////
@ -354,4 +530,5 @@ extern int vx_ready_wait(vx_device_h hdevice, uint64_t timeout) {
vx_device *device = ((vx_device*)hdevice);
return device->wait(timeout);
}
}

View file

@ -1,6 +1,22 @@
`ifndef VX_CONFIG
`define VX_CONFIG
`ifndef VM_ADDR_MODE
`define VM_ADDR_MODE SV32
`endif
`ifndef PTE_SIZE
`define PTE_SIZE 4
`endif
`ifndef TLB_SIZE
`define TLB_SIZE 32
`endif
`ifndef SUPER_PAGING
`define SUPER_PAGING true
`endif
`ifndef XLEN
`define XLEN 32
`endif

View file

@ -4,9 +4,20 @@
#include <fstream>
#include <assert.h>
#include "util.h"
#include <VX_config.h>
#include <bitset>
using namespace vortex;
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
{
return (addr) & (1 << idx);
}
RamMemDevice::RamMemDevice(const char *filename, uint32_t wordSize)
: wordSize_(wordSize) {
std::ifstream input(filename);
@ -107,58 +118,125 @@ void MemoryUnit::ADecoder::write(const void *data, uint64_t addr, uint64_t size)
///////////////////////////////////////////////////////////////////////////////
MemoryUnit::MemoryUnit(uint64_t pageSize, uint64_t addrBytes, bool disableVm)
MemoryUnit::MemoryUnit(uint64_t pageSize, uint64_t addrBytes)
: pageSize_(pageSize)
, addrBytes_(addrBytes)
, disableVM_(disableVm) {
if (!disableVm) {
tlb_[0] = TLBEntry(0, 077);
}
}
, TLB_HIT(0)
, TLB_MISS(0)
, TLB_EVICT(0)
, PTW(0) {};
void MemoryUnit::attach(MemDevice &m, uint64_t start, uint64_t end) {
decoder_.map(start, end, m);
}
MemoryUnit::TLBEntry MemoryUnit::tlbLookup(uint64_t vAddr, uint32_t flagMask) {
auto iter = tlb_.find(vAddr / pageSize_);
std::pair<bool, uint64_t> MemoryUnit::tlbLookup(uint64_t vAddr, ACCESS_TYPE type, uint32_t* size_bits) {
//Find entry while accounting for different sizes.
for (auto entry : tlb_)
{
if(entry.first == vAddr >> entry.second.size_bits)
{
*size_bits = entry.second.size_bits;
vAddr = vAddr >> (*size_bits);
}
}
auto iter = tlb_.find(vAddr);
if (iter != tlb_.end()) {
if (iter->second.flags & flagMask)
return iter->second;
else {
throw PageFault(vAddr, false);
TLBEntry e = iter->second;
//Set mru bit if it is a hit.
iter->second.mru_bit = true;
//If at full capacity and no other unset bits.
// Clear all bits except the one we just looked up.
if (tlb_.size() == TLB_SIZE)
{
// bool no_cleared = true;
// for (auto& entry : tlb_)
// {
// no_cleared = no_cleared & entry.second.mru_bit;
// }
// if(no_cleared)
// {
for (auto& entry : tlb_)
{
entry.second.mru_bit = false;
}
iter->second.mru_bit = true;
//}
}
//Check access permissions.
if ( (type == ACCESS_TYPE::FETCH) & ((e.r == 0) | (e.x == 0)) )
{
throw Page_Fault_Exception("Page Fault : Incorrect permissions.");
}
else if ( (type == ACCESS_TYPE::LOAD) & (e.r == 0) )
{
throw Page_Fault_Exception("Page Fault : Incorrect permissions.");
}
else if ( (type == ACCESS_TYPE::STORE) & (e.w == 0) )
{
throw Page_Fault_Exception("Page Fault : Incorrect permissions.");
}
else
{
//TLB Hit
return std::make_pair(true, iter->second.pfn);
}
} else {
throw PageFault(vAddr, true);
//TLB Miss
return std::make_pair(false, 0);
}
}
void MemoryUnit::read(void *data, uint64_t addr, uint64_t size, bool sup) {
void MemoryUnit::read(void *data, uint64_t addr, uint64_t size, ACCESS_TYPE type ) {
uint64_t pAddr;
if (disableVM_) {
if (this->mode == VA_MODE::BARE) {
pAddr = addr;
} else {
uint32_t flagMask = sup ? 8 : 1;
TLBEntry t = this->tlbLookup(addr, flagMask);
pAddr = t.pfn * pageSize_ + addr % pageSize_;
pAddr = vAddr_to_pAddr(addr, type);
}
return decoder_.read(data, pAddr, size);
}
void MemoryUnit::write(const void *data, uint64_t addr, uint64_t size, bool sup) {
void MemoryUnit::write(const void *data, uint64_t addr, uint64_t size, ACCESS_TYPE type) {
uint64_t pAddr;
if (disableVM_) {
if ( (this->mode == VA_MODE::BARE) | (addr >= IO_BASE_ADDR) ) {
pAddr = addr;
} else {
uint32_t flagMask = sup ? 16 : 2;
TLBEntry t = tlbLookup(addr, flagMask);
pAddr = t.pfn * pageSize_ + addr % pageSize_;
pAddr = vAddr_to_pAddr(addr, type);
}
decoder_.write(data, pAddr, size);
}
void MemoryUnit::tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags) {
tlb_[virt / pageSize_] = TLBEntry(phys / pageSize_, flags);
void MemoryUnit::tlbAdd(uint64_t vpn, uint64_t pfn, uint32_t flags, uint32_t size_bits) {
if (tlb_.size() == TLB_SIZE - 1)
{
for (auto& entry : tlb_)
{
entry.second.mru_bit = false;
}
}
else if (tlb_.size() == TLB_SIZE)
{
uint64_t del;
for (auto entry : tlb_)
{
if (!entry.second.mru_bit)
{
del = entry.first;
break;
}
}
tlb_.erase(tlb_.find(del));TLB_EVICT++;
}
tlb_[vpn] = TLBEntry(pfn, flags, size_bits);
}
void MemoryUnit::tlbRm(uint64_t va) {
@ -313,4 +391,131 @@ void RAM::loadHexImage(const char* filename) {
++line;
--size;
}
}
}
uint64_t MemoryUnit::vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type)
{
uint64_t pfn;
uint32_t size_bits;
//First lookup TLB.
std::pair<bool, uint64_t> tlb_access = tlbLookup(vAddr, type, &size_bits);
if (tlb_access.first)
{
pfn = tlb_access.second;
TLB_HIT++;
}
else //Else walk the PT.
{
std::pair<uint64_t, uint8_t> ptw_access = page_table_walk(vAddr, type, &size_bits);
tlbAdd(vAddr>>size_bits, ptw_access.first, ptw_access.second,size_bits);
pfn = ptw_access.first; TLB_MISS++; PTW++;
unique_translations.insert(vAddr>>size_bits);PERF_UNIQUE_PTW = unique_translations.size();
}
//Construct final address using pfn and offset.
return (pfn << size_bits) + (vAddr & ((1 << size_bits) - 1));
}
std::pair<uint64_t, uint8_t> MemoryUnit::page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint32_t* size_bits)
{
uint32_t LEVELS = 2;
vAddr_SV32_t vAddr(vAddr_bits);
uint32_t pte_bytes;
//Get base page table.
uint64_t a = this->ptbr << 12;
int i = LEVELS - 1;
while(true)
{
//Read PTE.
decoder_.read(&pte_bytes, a+vAddr.vpn[i]*PTE_SIZE, sizeof(uint32_t));
PTE_SV32_t pte(pte_bytes);
//Check if it has invalid flag bits.
if ( (pte.v == 0) | ( (pte.r == 0) & (pte.w == 1) ) )
{
throw Page_Fault_Exception("Page Fault : Attempted to access invalid entry.");
}
if ( (pte.r == 0) & (pte.w == 0) & (pte.x == 0))
{
//Not a leaf node as rwx == 000
i--;
if (i < 0)
{
throw Page_Fault_Exception("Page Fault : No leaf node found.");
}
else
{
//Continue on to next level.
a = (pte_bytes >> 10 ) << 12;
}
}
else
{
//Leaf node found, finished walking.
a = (pte_bytes >> 10 ) << 12;
break;
}
}
PTE_SV32_t pte(pte_bytes);
//Check RWX permissions according to access type.
if ( (type == ACCESS_TYPE::FETCH) & ((pte.r == 0) | (pte.x == 0)) )
{
throw Page_Fault_Exception("Page Fault : TYPE FETCH, Incorrect permissions.");
}
else if ( (type == ACCESS_TYPE::LOAD) & (pte.r == 0) )
{
throw Page_Fault_Exception("Page Fault : TYPE LOAD, Incorrect permissions.");
}
else if ( (type == ACCESS_TYPE::STORE) & (pte.w == 0) )
{
throw Page_Fault_Exception("Page Fault : TYPE STORE, Incorrect permissions.");
}
uint64_t pfn;
if (i > 0)
{
//It is a super page.
if (pte.ppn[0] != 0)
{
//Misss aligned super page.
throw Page_Fault_Exception("Page Fault : Miss Aligned Super Page.");
}
else
{
//Valid super page.
pfn = pte.ppn[1];
*size_bits = 22;
}
}
else
{
//Regular page.
*size_bits = 12;
pfn = a >> 12;
}
return std::make_pair(pfn, pte_bytes & 0xff);
}
uint32_t MemoryUnit::get_satp()
{
return this->satp;
}
void MemoryUnit::set_satp(uint32_t satp)
{
this->satp = satp;
this->ptbr = satp & 0x003fffff;
this->mode = satp & 0x80000000 ? VA_MODE::SV32 : VA_MODE::BARE;
}

View file

@ -3,9 +3,33 @@
#include <cstdint>
#include <vector>
#include <unordered_map>
#include <unordered_set>
#include <cstdint>
#include <stdexcept>
namespace vortex {
enum VA_MODE
{
BARE,
SV32
};
enum ACCESS_TYPE
{
LOAD,
STORE,
FETCH
};
class Page_Fault_Exception : public std::runtime_error /* or logic_error */
{
public:
Page_Fault_Exception(const std::string& what = "") : std::runtime_error(what) {}
uint64_t addr;
ACCESS_TYPE type;
};
struct BadAddress {};
class MemDevice {
@ -55,30 +79,34 @@ public:
///////////////////////////////////////////////////////////////////////////////
class MemoryUnit {
public:
struct PageFault {
PageFault(uint64_t a, bool nf)
: faultAddr(a)
, notFound(nf)
{}
uint64_t faultAddr;
bool notFound;
};
// struct PageFault {
// PageFault(uint64_t a, bool nf)
// : faultAddr(a)
// , notFound(nf)
// {}
// uint64_t faultAddr;
// bool notFound;
// };
MemoryUnit(uint64_t pageSize, uint64_t addrBytes, bool disableVm = false);
MemoryUnit(uint64_t pageSize, uint64_t addrBytes);
void attach(MemDevice &m, uint64_t start, uint64_t end);
void read(void *data, uint64_t addr, uint64_t size, bool sup);
void write(const void *data, uint64_t addr, uint64_t size, bool sup);
void read(void *data, uint64_t addr, uint64_t size, ACCESS_TYPE type);
void write(const void *data, uint64_t addr, uint64_t size, ACCESS_TYPE type);
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags);
void tlbAdd(uint64_t virt, uint64_t phys, uint32_t flags, uint32_t size_bits);
void tlbRm(uint64_t va);
void tlbFlush() {
tlb_.clear();
}
uint32_t get_satp();
void set_satp(uint32_t satp);
private:
class ADecoder {
@ -110,25 +138,57 @@ private:
struct TLBEntry {
TLBEntry() {}
TLBEntry(uint32_t pfn, uint32_t flags)
TLBEntry(uint32_t pfn, uint8_t flags, uint32_t size_bits)
: pfn(pfn)
, flags(flags)
{}
, flags(flags)
, mru_bit(true)
, size_bits (size_bits)
{
d = bit(7);
a = bit(6);
g = bit(5);
u = bit(4);
x = bit(3);
w = bit(2);
r = bit(1);
v = bit(0);
}
bool bit(uint8_t idx)
{
return (flags) & (1 << idx);
}
uint32_t pfn;
uint32_t flags;
uint8_t flags;
bool d, a, g, u, x, w, r, v;
bool mru_bit;
uint32_t size_bits;
};
TLBEntry tlbLookup(uint64_t vAddr, uint32_t flagMask);
std::pair<bool, uint64_t> tlbLookup(uint64_t vAddr, ACCESS_TYPE type, uint32_t* size_bits);
uint64_t vAddr_to_pAddr(uint64_t vAddr, ACCESS_TYPE type);
std::pair<uint64_t, uint8_t> page_table_walk(uint64_t vAddr_bits, ACCESS_TYPE type, uint32_t* size_bits);
std::unordered_map<uint64_t, TLBEntry> tlb_;
uint64_t pageSize_;
uint64_t addrBytes_;
ADecoder decoder_;
bool disableVM_;
uint32_t satp;
VA_MODE mode;
uint32_t ptbr;
std::unordered_set<uint64_t> unique_translations;
uint64_t TLB_HIT, TLB_MISS, TLB_EVICT, PTW, PERF_UNIQUE_PTW;
};
///////////////////////////////////////////////////////////////////////////////
class RAM : public MemDevice {
public:
@ -164,4 +224,66 @@ private:
mutable uint64_t last_page_index_;
};
class PTE_SV32_t
{
private:
uint64_t address;
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint8_t idx)
{
return (address) & (1 << idx);
}
public:
uint64_t ppn[2];
uint32_t rsw;
uint32_t flags;
bool d, a, g, u, x, w, r, v;
PTE_SV32_t(uint64_t address) : address(address)
{
flags = bits(address,0,7);
rsw = bits(address,8,9);
ppn[0] = bits(address,10,19);
ppn[1] = bits(address,20,31);
d = bit(7);
a = bit(6);
g = bit(5);
u = bit(4);
x = bit(3);
w = bit(2);
r = bit(1);
v = bit(0);
}
};
class vAddr_SV32_t
{
private:
uint64_t address;
uint64_t bits(uint64_t addr, uint8_t s_idx, uint8_t e_idx)
{
return (addr >> s_idx) & ((1 << (e_idx - s_idx + 1)) - 1);
}
bool bit(uint64_t addr, uint8_t idx)
{
return (addr) & (1 << idx);
}
public:
uint64_t vpn[2];
uint64_t pgoff;
vAddr_SV32_t(uint64_t address) : address(address)
{
vpn[0] = bits(address,12,21);
vpn[1] = bits(address,22,31);
pgoff = bits(address,0,11);
}
};
} // namespace vortex

View file

@ -20,7 +20,7 @@ Core::Core(const SimContext& ctx, const ArchDef &arch, uint32_t id)
, id_(id)
, arch_(arch)
, decoder_(arch)
, mmu_(0, arch.wsize(), true)
, mmu_(0, arch.wsize())
, smem_(RAM_PAGE_SIZE)
, tex_units_(NUM_TEX_UNITS, this)
, warps_(arch.num_warps())
@ -401,7 +401,15 @@ WarpMask Core::barrier(uint32_t bar_id, uint32_t count, uint32_t warp_id) {
}
void Core::icache_read(void *data, uint64_t addr, uint32_t size) {
mmu_.read(data, addr, size, 0);
try
{
mmu_.read(data, addr, size, ACCESS_TYPE::FETCH);
}
catch (Page_Fault_Exception& page_fault)
{
std::cout<<page_fault.what()<<std::endl;
throw;
}
}
void Core::dcache_read(void *data, uint64_t addr, uint32_t size) {
@ -409,8 +417,16 @@ void Core::dcache_read(void *data, uint64_t addr, uint32_t size) {
if (type == AddrType::Shared) {
addr &= (SMEM_SIZE-1);
smem_.read(data, addr, size);
} else {
mmu_.read(data, addr, size, 0);
} else {
try
{
mmu_.read(data, addr, size, ACCESS_TYPE::LOAD);
}
catch (Page_Fault_Exception& page_fault)
{
std::cout<<page_fault.what()<<std::endl;
throw;
}
}
}
@ -424,7 +440,15 @@ void Core::dcache_write(const void* data, uint64_t addr, uint32_t size) {
addr &= (SMEM_SIZE-1);
smem_.write(data, addr, size);
} else {
mmu_.write(data, addr, size, 0);
try
{
mmu_.write(data, addr, size, ACCESS_TYPE::STORE);
}
catch (Page_Fault_Exception& page_fault)
{
std::cout<<page_fault.what()<<std::endl;
throw;
}
}
}
}
@ -449,6 +473,7 @@ void Core::writeToStdOut(const void* data, uint64_t addr, uint32_t size) {
uint32_t Core::get_csr(uint32_t addr, uint32_t tid, uint32_t wid) {
switch (addr) {
case CSR_SATP:
return csrs_.at(addr);
case CSR_PMPCFG0:
case CSR_PMPADDR0:
case CSR_MSTATUS:
@ -665,6 +690,8 @@ void Core::set_csr(uint32_t addr, uint32_t value, uint32_t /*tid*/, uint32_t wid
#endif
{
csrs_.at(addr) = value;
if (addr == CSR_SATP)
this->mmu_.set_satp(value);
}
}

View file

@ -157,6 +157,13 @@ public:
return exitcode;
}
//Added
void set_core_satp(uint32_t satp) {
for (auto core : cores_) {
core->set_csr(CSR_SATP,satp,0,0);
}
}
};
///////////////////////////////////////////////////////////////////////////////
@ -175,4 +182,15 @@ void Processor::attach_ram(RAM* mem) {
int Processor::run() {
return impl_->run();
}
}
//Added
uint32_t Processor::get_satp() {
return this->satp;
}
//Added
void Processor::set_satp(uint32_t satp) {
this->satp = satp;
impl_->set_core_satp(satp);
}

View file

@ -1,4 +1,5 @@
#pragma once
#include <stdint.h>
namespace vortex {
@ -14,9 +15,13 @@ public:
int run();
uint32_t get_satp();//added
void set_satp(uint32_t satp);//added
private:
class Impl;
Impl* impl_;
uint32_t satp;//added
};
}