Ayaz Akram has uploaded this change for review. (
https://gem5-review.googlesource.com/c/public/gem5/+/43945 )
Change subject: arch-riscv: Add riscv pmp support
......................................................................
arch-riscv: Add riscv pmp support
This change adds the pmp (physical memory protection)
feature of riscv previliged isa.
Change-Id: Ica701223cfc1be91a0bf953e6a3df6d72d6d3130
---
A src/arch/riscv/PMP.py
M src/arch/riscv/RiscvMMU.py
M src/arch/riscv/RiscvTLB.py
M src/arch/riscv/SConscript
M src/arch/riscv/isa.cc
M src/arch/riscv/mmu.hh
M src/arch/riscv/pagetable_walker.cc
M src/arch/riscv/pagetable_walker.hh
A src/arch/riscv/pmp.cc
A src/arch/riscv/pmp.hh
M src/arch/riscv/tlb.cc
M src/arch/riscv/tlb.hh
12 files changed, 570 insertions(+), 2 deletions(-)
diff --git a/src/arch/riscv/PMP.py b/src/arch/riscv/PMP.py
new file mode 100644
index 0000000..05084ee
--- /dev/null
+++ b/src/arch/riscv/PMP.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2021 The Regents of the University of California
+# All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from m5.SimObject import SimObject
+from m5.params import *
+from m5.proxy import *
+
+class PMP(SimObject):
+ type = 'PMP'
+ cxx_header = 'arch/riscv/pmp.hh'
+ max_pmp = Param.Int(16, "Max PMP Entries")
+
diff --git a/src/arch/riscv/RiscvMMU.py b/src/arch/riscv/RiscvMMU.py
index 38f1da9..5f1aa11 100644
--- a/src/arch/riscv/RiscvMMU.py
+++ b/src/arch/riscv/RiscvMMU.py
@@ -40,6 +40,7 @@
from m5.objects.BaseMMU import BaseMMU
from m5.objects.RiscvTLB import RiscvTLB
from m5.objects.PMAChecker import PMAChecker
+from m5.objects.PMP import PMP
class RiscvMMU(BaseMMU):
type = 'RiscvMMU'
@@ -48,6 +49,7 @@
itb = RiscvTLB()
dtb = RiscvTLB()
pma_checker = Param.PMAChecker(PMAChecker(), "PMA Checker")
+ pmp = PMP()
@classmethod
def walkerPorts(cls):
diff --git a/src/arch/riscv/RiscvTLB.py b/src/arch/riscv/RiscvTLB.py
index 05ff521..0a4e257 100644
--- a/src/arch/riscv/RiscvTLB.py
+++ b/src/arch/riscv/RiscvTLB.py
@@ -44,6 +44,7 @@
"Number of outstanding walks that can be squashed per cycle")
# Grab the pma_checker from the MMU
pma_checker = Param.PMAChecker(Parent.any, "PMA Checker")
+ pmp = Param.PMP(Parent.any, "PMP")
class RiscvTLB(BaseTLB):
type = 'RiscvTLB'
@@ -54,3 +55,4 @@
RiscvPagetableWalker(), "page table walker")
# Grab the pma_checker from the MMU
pma_checker = Param.PMAChecker(Parent.any, "PMA Checker")
+ pmp = Param.PMP(Parent.any, "PMP")
diff --git a/src/arch/riscv/SConscript b/src/arch/riscv/SConscript
index ac47df5..03e382c 100644
--- a/src/arch/riscv/SConscript
+++ b/src/arch/riscv/SConscript
@@ -52,6 +52,7 @@
Source('pagetable.cc')
Source('pagetable_walker.cc')
Source('pma_checker.cc')
+ Source('pmp.cc')
Source('reg_abi.cc')
Source('remote_gdb.cc')
Source('tlb.cc')
@@ -63,6 +64,7 @@
Source('bare_metal/fs_workload.cc')
SimObject('PMAChecker.py')
+ SimObject('PMP.py')
SimObject('RiscvFsWorkload.py')
SimObject('RiscvInterrupts.py')
SimObject('RiscvISA.py')
@@ -72,6 +74,7 @@
DebugFlag('RiscvMisc')
DebugFlag('TLBVerbose')
+ DebugFlag('PMP')
DebugFlag('PageTableWalker', \
"Page table walker state machine debugging")
diff --git a/src/arch/riscv/isa.cc b/src/arch/riscv/isa.cc
index c6f0ca0..177957b 100644
--- a/src/arch/riscv/isa.cc
+++ b/src/arch/riscv/isa.cc
@@ -35,7 +35,9 @@
#include <sstream>
#include "arch/riscv/interrupts.hh"
+#include "arch/riscv/mmu.hh"
#include "arch/riscv/pagetable.hh"
+#include "arch/riscv/pmp.hh"
#include "arch/riscv/registers.hh"
#include "base/bitfield.hh"
#include "base/compiler.hh"
@@ -356,6 +358,58 @@
warn("Ignoring write to %s.\n", CSRData.at(misc_reg).name);
} else {
switch (misc_reg) {
+
+ // From section 3.7.1 of RISCV priv. specs
+ // V1.12, the odd-numbered configuration
+ // registers are illegal for RV64 and
+ // each 64 bit CFG register hold configurations
+ // for 8 PMP entries.
+
+ case MISCREG_PMPCFG0:
+ case MISCREG_PMPCFG2:
+ {
+ // PMP registers should only be modified in M mode
+ assert(readMiscRegNoEffect(MISCREG_PRV) == PRV_M);
+
+ // Specs do not seem to mention what should be
+ // configured first, cfg or address regs!
+ // qemu seems to update the tables when
+ // pmp addr regs are written (with the assumption
+ // that cfg regs are already written)
+
+ for (int i=0; i < sizeof(val); i++) {
+
+ uint8_t cfg_val = (val >> (8*i)) & 0xff;
+
+ auto mmu = dynamic_cast<RiscvISA::MMU *>
+ (tc->getMMUPtr());
+
+ // Form pmp_index using the index i and
+ // PMPCFG register number
+ // Note: MISCREG_PMPCFG2 - MISCREG_PMPCFG0 = 1
+ // 8*(misc_reg-MISCREG_PMPCFG0) will be useful
+ // if a system contains more than 16 PMP entries
+ uint32_t pmp_index = i+(8*(misc_reg-MISCREG_PMPCFG0));
+ mmu->getPMP()->pmpUpdateCfg(pmp_index,cfg_val);
+ }
+
+ setMiscRegNoEffect(misc_reg, val);
+ }
+ break;
+ case MISCREG_PMPADDR00 ... MISCREG_PMPADDR15:
+ {
+ // PMP registers should only be modified in M mode
+ assert(readMiscRegNoEffect(MISCREG_PRV) == PRV_M);
+
+ auto mmu = dynamic_cast<RiscvISA::MMU *>
+ (tc->getMMUPtr());
+ uint32_t pmp_index = misc_reg-MISCREG_PMPADDR00;
+ mmu->getPMP()->pmpUpdateAddr(pmp_index, val);
+
+ setMiscRegNoEffect(misc_reg, val);
+ }
+ break;
+
case MISCREG_IP:
{
auto ic = dynamic_cast<RiscvISA::Interrupts *>(
diff --git a/src/arch/riscv/mmu.hh b/src/arch/riscv/mmu.hh
index ce3ce30..3e58829 100644
--- a/src/arch/riscv/mmu.hh
+++ b/src/arch/riscv/mmu.hh
@@ -74,6 +74,13 @@
MMU *ommu = dynamic_cast<MMU*>(old_mmu);
BaseMMU::takeOverFrom(ommu);
pma->takeOverFrom(ommu->pma);
+
+ }
+
+ PMP *
+ getPMP()
+ {
+ return static_cast<TLB*>(dtb)->pmp;
}
};
diff --git a/src/arch/riscv/pagetable_walker.cc
b/src/arch/riscv/pagetable_walker.cc
index 263a047..d3c3905 100644
--- a/src/arch/riscv/pagetable_walker.cc
+++ b/src/arch/riscv/pagetable_walker.cc
@@ -490,8 +490,14 @@
Addr paddr = walker->tlb->translateWithTLB(vaddr, satp.asid,
mode);
req->setPaddr(paddr);
walker->pma->check(req);
+
+ // do pmp check if any checking condition is met.
+ // timingFault will be NoFault if pmp checks are
+ // passed, otherwise an address fault will be returned.
+ timingFault = walker->pmp->pmpCheck(req, mode, pmode, tc);
+
// Let the CPU continue.
- translation->finish(NoFault, req, tc, mode);
+ translation->finish(timingFault, req, tc, mode);
} else {
// There was a fault during the walk. Let the CPU know.
translation->finish(timingFault, req, tc, mode);
diff --git a/src/arch/riscv/pagetable_walker.hh
b/src/arch/riscv/pagetable_walker.hh
index 1b848a0..7ef18a8 100644
--- a/src/arch/riscv/pagetable_walker.hh
+++ b/src/arch/riscv/pagetable_walker.hh
@@ -43,6 +43,7 @@
#include "arch/riscv/pagetable.hh"
#include "arch/riscv/pma_checker.hh"
+#include "arch/riscv/pmp.hh"
#include "arch/riscv/tlb.hh"
#include "base/types.hh"
#include "mem/packet.hh"
@@ -169,6 +170,7 @@
TLB * tlb;
System * sys;
PMAChecker * pma;
+ PMP * pmp;
RequestorID requestorId;
// The number of outstanding walks that can be squashed per cycle.
@@ -200,6 +202,7 @@
ClockedObject(params), port(name() + ".port", this),
funcState(this, NULL, NULL, true), tlb(NULL),
sys(params.system),
pma(params.pma_checker),
+ pmp(params.pmp),
requestorId(sys->getRequestorId(this)),
numSquashable(params.num_squash_per_cycle),
startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
diff --git a/src/arch/riscv/pmp.cc b/src/arch/riscv/pmp.cc
new file mode 100644
index 0000000..3eea436
--- /dev/null
+++ b/src/arch/riscv/pmp.cc
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2021 The Regents of the University of California
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arch/riscv/pmp.hh"
+
+#include "arch/generic/tlb.hh"
+#include "arch/riscv/faults.hh"
+#include "arch/riscv/isa.hh"
+#include "base/addr_range.hh"
+#include "base/types.hh"
+#include "cpu/thread_context.hh"
+#include "debug/PMP.hh"
+#include "math.h"
+#include "mem/request.hh"
+#include "params/PMP.hh"
+#include "sim/sim_object.hh"
+
+PMP::PMP(const Params ¶ms) :
+ SimObject(params),
+ maxEntries(params.max_pmp),
+ numRules(0)
+{
+ for (int i=0; i < maxEntries; i++) {
+ PmpEntry entry;
+ pmpTable.emplace_back(entry);
+ }
+}
+
+Fault
+PMP::pmpCheck(const RequestPtr &req, BaseTLB::Mode mode,
+ RiscvISA::PrivilegeMode pmode, ThreadContext *tc)
+{
+ // First determine if pmp table should be consulted
+ if (!shouldCheckPMP(pmode, mode, tc))
+ return NoFault;
+
+ DPRINTF(PMP, "Checking pmp permissions for va: %#x , pa: %#x\n",
+ req->getVaddr(), req->getPaddr());
+
+ // An access should be successful if there are
+ // no rules defined yet or we are in M mode (based
+ // on specs v1.10)
+ if (numRules == 0 || (pmode == RiscvISA::PrivilegeMode::PRV_M))
+ return NoFault;
+
+ // match_index will be used to identify the pmp entry
+ // which matched for the given address
+ int match_index = -1;
+
+ // all pmp entries need to be looked from the lowest to
+ // the highest number
+ for (int i = 0; i < pmpTable.size(); i++) {
+ AddrRange pmp_range = pmpTable[i].pmpAddr;
+ if (pmp_range.contains(req->getPaddr()) &&
+ pmp_range.contains(req->getPaddr() + req->getSize())) {
+ // according to specs address is only matched,
+ // when (addr) and (addr + request_size) are both
+ // within the pmp range
+ match_index = i;
+ }
+
+ if ((PMP_OFF != pmpGetAField(pmpTable[match_index].pmpCfg))
+ && (match_index > -1)) {
+ // check the RWX permissions from the pmp entry
+ uint8_t allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+
+ // i is the index of pmp table which matched
+ allowed_privs &= pmpTable[match_index].pmpCfg;
+
+ if ((mode == BaseTLB::Mode::Read) &&
+ (PMP_READ & allowed_privs)) {
+ return NoFault;
+ }
+ else if ((mode == BaseTLB::Mode::Write) &&
+ (PMP_WRITE & allowed_privs)) {
+ return NoFault;
+ }
+ else if ((mode == BaseTLB::Mode::Execute) &&
+ (PMP_EXEC & allowed_privs)) {
+ return NoFault;
+ }
+ else {
+ return createAddrfault(req->getVaddr(), mode);
+ }
+ }
+ }
+ // if no entry matched and we are not in M mode return fault
+ return createAddrfault(req->getVaddr(), mode);
+}
+
+Fault
+PMP::createAddrfault(Addr vaddr, BaseTLB::Mode mode)
+{
+ RiscvISA::ExceptionCode code;
+ if (mode == BaseTLB::Read) {
+ code = RiscvISA::ExceptionCode::LOAD_ACCESS;
+ } else if (mode == BaseTLB::Write) {
+ code = RiscvISA::ExceptionCode::STORE_ACCESS;
+ } else {
+ code = RiscvISA::ExceptionCode::INST_ACCESS;
+ }
+ return std::make_shared<RiscvISA::AddressFault>(vaddr, code);
+}
+
+inline uint8_t
+PMP::pmpGetAField(uint8_t cfg)
+{
+ // to get a field from pmpcfg register
+ uint8_t a = cfg >> 3;
+ return a & 0x03;
+}
+
+
+void
+PMP::pmpUpdateCfg(uint32_t pmp_index, uint8_t this_cfg)
+{
+ DPRINTF(PMP, "Update pmp config with %u for pmp entry: %u \n",
+ (unsigned)this_cfg, pmp_index);
+
+ warn_if((PMP_LOCK & this_cfg), "pmp lock feature is not supported.");
+
+ pmpTable[pmp_index].pmpCfg = this_cfg;
+ pmpUpdateRule(pmp_index);
+
+}
+
+void
+PMP::pmpUpdateRule(uint32_t pmp_index)
+{
+ // In qemu, the rule is updated whenever
+ // pmpaddr/pmpcfg is written
+
+ numRules = 0;
+ Addr prevAddr = 0;
+
+ if (pmp_index >= 1) {
+ prevAddr = pmpTable[pmp_index - 1].rawAddr;
+ }
+
+ Addr this_addr = pmpTable[pmp_index].rawAddr;
+ uint8_t this_cfg = pmpTable[pmp_index].pmpCfg;
+ AddrRange this_range;
+
+ switch (pmpGetAField(this_cfg)) {
+ // checking the address matching mode of pmp entry
+ case PMP_OFF:
+ // null region (pmp disabled)
+ this_range = AddrRange(0, 0);
+ break;
+ case PMP_TOR:
+ // top of range mode
+ this_range = AddrRange(prevAddr << 2, (this_addr << 2) - 1);
+ break;
+ case PMP_NA4:
+ // naturally aligned four byte region
+ this_range = AddrRange(this_addr << 2, (this_addr + 4) - 1);
+ break;
+ case PMP_NAPOT:
+ // naturally aligned power of two region, >= 8 bytes
+ this_range = AddrRange(pmpDecodeNapot(this_addr));
+ break;
+ default:
+ this_range = AddrRange(0,0);
+ }
+
+ pmpTable[pmp_index].pmpAddr = this_range;
+
+ for (int i = 0; i < maxEntries; i++) {
+ const uint8_t a_field =
+ pmpGetAField(pmpTable[i].pmpCfg);
+ if (PMP_OFF != a_field) {
+ numRules++;
+ }
+ }
+}
+
+void
+PMP::pmpUpdateAddr(uint32_t pmp_index, Addr this_addr)
+{
+ DPRINTF(PMP, "Update pmp addr %#x for pmp entry %u \n",
+ this_addr, pmp_index);
+
+ // just writing the raw addr in the pmp table
+ // will convert it into a range, once cfg
+ // reg is written
+ pmpTable[pmp_index].rawAddr = this_addr;
+ pmpUpdateRule(pmp_index);
+}
+
+bool
+PMP::shouldCheckPMP(RiscvISA::PrivilegeMode pmode,
+ BaseTLB::Mode mode, ThreadContext *tc)
+{
+ // instruction fetch in S and U mode
+ bool cond1 = (mode == BaseTLB::Execute &&
+ (pmode != RiscvISA::PrivilegeMode::PRV_M));
+
+ // data access in S and U mode when MPRV in mstatus is clear
+ RiscvISA::STATUS status =
+ tc->readMiscRegNoEffect(RiscvISA::MISCREG_STATUS);
+ bool cond2 = (mode != BaseTLB::Execute &&
+ (pmode != RiscvISA::PrivilegeMode::PRV_M)
+ && (!status.mprv));
+
+ // data access in any mode when MPRV bit in mstatus is set
+ // and the MPP field in mstatus is S or U
+ bool cond3 = (mode != BaseTLB::Execute && (status.mprv)
+ && (status.mpp != RiscvISA::PrivilegeMode::PRV_M));
+
+ return (cond1 || cond2 || cond3);
+}
+
+AddrRange
+PMP::pmpDecodeNapot(Addr pmpaddr)
+{
+ if (pmpaddr == -1) {
+ AddrRange this_range(0, -1);
+ return this_range;
+ } else {
+ uint64_t t1 = ctz64(~pmpaddr);
+ uint64_t range = (std::pow(2,t1+3))-1;
+
+ // pmpaddr reg encodes bits 55-2 of a
+ // 56 bit physical address for RV64
+ uint64_t base = mbits(pmpaddr, 63, t1) << 2;
+ AddrRange this_range(base, base+range);
+ return this_range;
+ }
+}
diff --git a/src/arch/riscv/pmp.hh b/src/arch/riscv/pmp.hh
new file mode 100644
index 0000000..b5027c4
--- /dev/null
+++ b/src/arch/riscv/pmp.hh
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2021 The Regents of the University of California
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARCH_RISCV_PMP_HH__
+#define __ARCH_RISCV_PMP_HH__
+
+#include "arch/generic/tlb.hh"
+#include "arch/riscv/isa.hh"
+#include "base/addr_range.hh"
+#include "base/types.hh"
+#include "mem/packet.hh"
+#include "params/PMP.hh"
+#include "sim/sim_object.hh"
+
+/**
+ * @file
+ * PMP header file.
+ */
+
+/**
+ * This class helps to implement RISCV's physical memory
+ * protection (pmp) primitive.
+ * @todo Add statistics and debug prints.
+ */
+class PMP : public SimObject
+{
+ public:
+ typedef PMPParams Params;
+
+ const Params &
+ params() const
+ {
+ return dynamic_cast<const Params &>(_params);
+ }
+
+ PMP(const Params ¶ms);
+
+ private:
+ /** maximum number of entries in the pmp table */
+ int maxEntries;
+
+ /** This enum is used for encoding of address matching mode of
+ * pmp address register, which is present in bits 3-4 (A) of
+ * pmpcfg register for a pmp entry.
+ * PMP_OFF = null region (pmp disabled)
+ * MP_TOR = top of range mode
+ * PMP_NA4 = naturally aligned four byte region
+ * PMP_NAPOT = naturally aligned power of two region, >= 8 bytes
+ */
+ typedef enum {
+ PMP_OFF,
+ PMP_TOR,
+ PMP_NA4,
+ PMP_NAPOT
+ } pmpAmatch;
+
+ /** pmpcfg address range read permission mask */
+ const uint8_t PMP_READ = 1 << 0;
+
+ /** pmpcfg address range write permission mask */
+ const uint8_t PMP_WRITE = 1 << 1;
+
+ /** pmpcfg address range execute permission mask */
+ const uint8_t PMP_EXEC = 1 << 2;
+
+ /** pmpcfg address range locked mask */
+ const uint8_t PMP_LOCK = 1 << 7;
+
+ /** variable to keep track of active number of rules any time */
+ int numRules;
+
+ /** single pmp entry struct*/
+ typedef struct {
+ /** addr range corresponding to a single pmp entry */
+ AddrRange pmpAddr = AddrRange(0, 0);
+ /** raw addr in pmpaddr register for a pmp entry */
+ Addr rawAddr;
+ /** pmpcfg reg value for a pmp entry */
+ uint8_t pmpCfg = 0;
+ } PmpEntry;
+
+ /** a table of pmp entries */
+ std::vector<PmpEntry> pmpTable;
+
+ public:
+ /**
+ * pmpCheck checks if a particular memory access
+ * is allowed based on the pmp rules.
+ * @param req memory request.
+ * @param mode mode of request (read, write, execute).
+ * @param pmode current privilege mode of execution (U, S, M).
+ * @param tc thread context.
+ * @return Fault.
+ */
+ Fault pmpCheck(const RequestPtr &req, BaseTLB::Mode mode,
+ RiscvISA::PrivilegeMode pmode, ThreadContext *tc);
+
+ /**
+ * pmpUpdateCfg updates the pmpcfg for a pmp
+ * entry and calls pmpUpdateRule to update the
+ * rule of corresponding pmp entry.
+ * @param pmp_index pmp entry index.
+ * @param this_cfg value to be written to pmpcfg.
+ */
+ void pmpUpdateCfg(uint32_t pmp_index, uint8_t this_cfg);
+
+ /**
+ * pmpUpdateAddr updates the pmpaddr for a pmp
+ * entry and calls pmpUpdateRule to update the
+ * rule of corresponding pmp entry.
+ * @param pmp_index pmp entry index.
+ * @param this_addr value to be written to pmpaddr.
+ */
+ void pmpUpdateAddr(uint32_t pmp_index, Addr this_addr);
+
+ private:
+ /**
+ * This function is called during a memory
+ * access to determine if the pmp table
+ * should be consulted for this access.
+ * @param pmode current privilege mode of execution (U, S, M).
+ * @param mode mode of request (read, write, execute).
+ * @param tc thread context.
+ * @return true or false.
+ */
+ bool shouldCheckPMP(RiscvISA::PrivilegeMode pmode,
+ BaseTLB::Mode mode, ThreadContext *tc);
+
+ /**
+ * createAddrfault creates an address fault
+ * if the pmp checks fail to pass for a given
+ * access. This function is used by pmpCheck().
+ * given pmp entry depending on the value
+ * of pmpaddr and pmpcfg for that entry.
+ * @param vaddr virtual address of the access.
+ * @param mode mode of access(read, write, execute).
+ * @return Fault.
+ */
+ Fault createAddrfault(Addr vaddr, BaseTLB::Mode mode);
+
+ /**
+ * pmpUpdateRule updates the pmp rule for a
+ * given pmp entry depending on the value
+ * of pmpaddr and pmpcfg for that entry.
+ * @param pmp_index pmp entry index.
+ */
+ void pmpUpdateRule(uint32_t pmp_index);
+
+ /**
+ * pmpGetAField extracts the A field (address matching mode)
+ * from an input pmpcfg register
+ * @param cfg pmpcfg register value.
+ * @return The A field.
+ */
+ inline uint8_t pmpGetAField(uint8_t cfg);
+
+ /**
+ * This function decodes a pmpaddr register value
+ * into an address range when A field of pmpcfg
+ * register is set to NAPOT mode (naturally aligned
+ * power of two region).
+ * @param pmpaddr input address from a pmp entry.
+ * @return an address range.
+ */
+ inline AddrRange pmpDecodeNapot(Addr pmpaddr);
+
+};
+
+#endif // __ARCH_RISCV_PMP_HH__
diff --git a/src/arch/riscv/tlb.cc b/src/arch/riscv/tlb.cc
index a2df355..8859ae5 100644
--- a/src/arch/riscv/tlb.cc
+++ b/src/arch/riscv/tlb.cc
@@ -39,6 +39,7 @@
#include "arch/riscv/pagetable.hh"
#include "arch/riscv/pagetable_walker.hh"
#include "arch/riscv/pma_checker.hh"
+#include "arch/riscv/pmp.hh"
#include "arch/riscv/pra_constants.hh"
#include "arch/riscv/utility.hh"
#include "base/inifile.hh"
@@ -68,7 +69,8 @@
TLB::TLB(const Params &p) :
BaseTLB(p), size(p.size), tlb(size),
- lruSeq(0), stats(this), pma(p.pma_checker)
+ lruSeq(0), stats(this), pma(p.pma_checker),
+ pmp(p.pmp)
{
for (size_t x = 0; x < size; x++) {
tlb[x].trieHandle = NULL;
@@ -365,6 +367,11 @@
if (!delayed && fault == NoFault) {
pma->check(req);
+
+ // do pmp check if any checking condition is met.
+ // timingFault will be NoFault if pmp checks are
+ // passed, otherwise an address fault will be returned.
+ fault = pmp->pmpCheck(req, mode, pmode, tc);
}
return fault;
diff --git a/src/arch/riscv/tlb.hh b/src/arch/riscv/tlb.hh
index 1421396..b739cdb 100644
--- a/src/arch/riscv/tlb.hh
+++ b/src/arch/riscv/tlb.hh
@@ -84,6 +84,7 @@
public:
PMAChecker *pma;
+ PMP *pmp;
public:
typedef RiscvTLBParams Params;
--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/43945
To unsubscribe, or for help writing mail filters, visit
https://gem5-review.googlesource.com/settings
Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: Ica701223cfc1be91a0bf953e6a3df6d72d6d3130
Gerrit-Change-Number: 43945
Gerrit-PatchSet: 1
Gerrit-Owner: Ayaz Akram <yazak...@ucdavis.edu>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s