Roger Chang has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/68057?usp=email )

Change subject: arch-riscv: Support PMP lock feature
......................................................................

arch-riscv: Support PMP lock feature

The lock feature will let M mode do memory permission check before
R/W/X data. If the lock bit of pmpicfg set, then the pmpicfg and
pmpaddri will ignore the update value later until CPU reset, and
pmpaddri-1 will ignore if the TOR A field is set.

The following is add in CL:
1. Add condition to run PMP check when any lock bit of pmp tables
   is set
2. Add PMP_LOCK bit check when try to update pmpaddr and pmpcfg
3. If there is no PMP entry matches and priviledge mode is M,
   no fault generated
4. If the address matches PMP entry, return no fault if priviledge
mode is M and lock bit is not set

For more details about PMP, please see RISC-V Spec Volumn II,
Priviledge Archtecture, Ver 1.12, Section 3.7 Physical Memory
Protection

Change-Id: I3e7c5824d6c05f2ea928ee9ec7714f7271e4c58c
---
M src/arch/riscv/PMP.py
M src/arch/riscv/faults.cc
M src/arch/riscv/isa.cc
M src/arch/riscv/pmp.cc
M src/arch/riscv/pmp.hh
5 files changed, 131 insertions(+), 29 deletions(-)



diff --git a/src/arch/riscv/PMP.py b/src/arch/riscv/PMP.py
index a3844c9..c395688 100644
--- a/src/arch/riscv/PMP.py
+++ b/src/arch/riscv/PMP.py
@@ -32,6 +32,6 @@
 class PMP(SimObject):
     type = "PMP"
     cxx_header = "arch/riscv/pmp.hh"
-    cxx_class = "gem5::PMP"
+    cxx_class = "gem5::RiscvISA::PMP"

     pmp_entries = Param.Int(16, "Maximum PMP Entries Supported")
diff --git a/src/arch/riscv/faults.cc b/src/arch/riscv/faults.cc
index 3469c71..8c6c0c0 100644
--- a/src/arch/riscv/faults.cc
+++ b/src/arch/riscv/faults.cc
@@ -33,6 +33,8 @@

 #include "arch/riscv/insts/static_inst.hh"
 #include "arch/riscv/isa.hh"
+#include "arch/riscv/mmu.hh"
+#include "arch/riscv/pmp.hh"
 #include "arch/riscv/regs/misc.hh"
 #include "arch/riscv/utility.hh"
 #include "cpu/base.hh"
@@ -180,6 +182,15 @@
         tc->getIsaPtr()->newPCState(workload->getEntry())));
     panic_if(!new_pc, "Failed create new PCState from ISA pointer");
     tc->pcState(*new_pc);
+
+    // Reset PMP Cfg
+    auto* mmu = dynamic_cast<RiscvISA::MMU*>(tc->getMMUPtr());
+    if (mmu == NULL) {
+        warn("MMU is not Riscv MMU instance, we can't reset PMP");
+        return;
+    }
+    auto* pmp = dynamic_cast<RiscvISA::PMP*>(mmu->getPMP());
+    pmp->pmpReset();
 }

 void
diff --git a/src/arch/riscv/isa.cc b/src/arch/riscv/isa.cc
index 3809c61..03c6418 100644
--- a/src/arch/riscv/isa.cc
+++ b/src/arch/riscv/isa.cc
@@ -544,6 +544,8 @@
                 // qemu seems to update the tables when
                 // pmp addr regs are written (with the assumption
                 // that cfg regs are already written)
+                RegVal res = 0;
+                RegVal old_val = readMiscRegNoEffect(idx);

                 for (int i=0; i < regSize; i++) {

@@ -554,10 +556,15 @@
                     // Form pmp_index using the index i and
                     // PMPCFG register number
                     uint32_t pmp_index = i+(4*(idx-MISCREG_PMPCFG0));
-                    mmu->getPMP()->pmpUpdateCfg(pmp_index,cfg_val);
+ bool result = mmu->getPMP()->pmpUpdateCfg(pmp_index,cfg_val);
+                    if (result) {
+                        res |= ((RegVal)cfg_val << (8*i));
+                    } else {
+                        res |= (old_val & (0xFF << (8*i)));
+                    }
                 }

-                setMiscRegNoEffect(idx, val);
+                setMiscRegNoEffect(idx, res);
             }
             break;
           case MISCREG_PMPADDR00 ... MISCREG_PMPADDR15:
@@ -568,9 +575,9 @@
                 auto mmu = dynamic_cast<RiscvISA::MMU *>
                               (tc->getMMUPtr());
                 uint32_t pmp_index = idx-MISCREG_PMPADDR00;
-                mmu->getPMP()->pmpUpdateAddr(pmp_index, val);
-
-                setMiscRegNoEffect(idx, val);
+                if(mmu->getPMP()->pmpUpdateAddr(pmp_index, val)){
+                    setMiscRegNoEffect(idx, val);
+                }
             }
             break;

diff --git a/src/arch/riscv/pmp.cc b/src/arch/riscv/pmp.cc
index 77ef98f..b728347 100644
--- a/src/arch/riscv/pmp.cc
+++ b/src/arch/riscv/pmp.cc
@@ -44,10 +44,13 @@
 namespace gem5
 {

+using namespace RiscvISA;
+
 PMP::PMP(const Params &params) :
     SimObject(params),
     pmpEntries(params.pmp_entries),
-    numRules(0)
+    numRules(0),
+    hasLockEntry(false)
 {
     pmpTable.resize(pmpEntries);
 }
@@ -70,10 +73,7 @@
                 req->getPaddr());
     }

-    // An access should be successful if there are
-    // no rules defined yet or we are in M mode (based
-    // on specs v1.10)
-    if (numRules == 0 || (pmode == RiscvISA::PrivilegeMode::PRV_M))
+    if (numRules == 0)
         return NoFault;

     // match_index will be used to identify the pmp entry
@@ -94,20 +94,19 @@

         if ((match_index > -1)
             && (PMP_OFF != pmpGetAField(pmpTable[match_index].pmpCfg))) {
-            // check the RWX permissions from the pmp entry
-            uint8_t allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+            uint8_t this_cfg = pmpTable[match_index].pmpCfg;

-            // i is the index of pmp table which matched
-            allowed_privs &= pmpTable[match_index].pmpCfg;
-
-            if ((mode == BaseMMU::Mode::Read) &&
-                                        (PMP_READ & allowed_privs)) {
+            if ((pmode == RiscvISA::PrivilegeMode::PRV_M) &&
+                                    (PMP_LOCK & this_cfg) == 0) {
+                return NoFault;
+            } else if ((mode == BaseMMU::Mode::Read) &&
+                                        (PMP_READ & this_cfg)) {
                 return NoFault;
             } else if ((mode == BaseMMU::Mode::Write) &&
-                                        (PMP_WRITE & allowed_privs)) {
+                                        (PMP_WRITE & this_cfg)) {
                 return NoFault;
             } else if ((mode == BaseMMU::Mode::Execute) &&
-                                        (PMP_EXEC & allowed_privs)) {
+                                        (PMP_EXEC & this_cfg)) {
                 return NoFault;
             } else {
                 if (req->hasVaddr()) {
@@ -119,7 +118,9 @@
         }
     }
     // if no entry matched and we are not in M mode return fault
-    if (req->hasVaddr()) {
+    if (pmode == RiscvISA::PrivilegeMode::PRV_M) {
+        return NoFault;
+    } else if (req->hasVaddr()) {
         return createAddrfault(req->getVaddr(), mode);
     } else {
         return createAddrfault(vaddr, mode);
@@ -150,17 +151,19 @@
 }


-void
+bool
 PMP::pmpUpdateCfg(uint32_t pmp_index, uint8_t this_cfg)
 {
     DPRINTF(PMP, "Update pmp config with %u for pmp entry: %u \n",
                                     (unsigned)this_cfg, pmp_index);
-
-    warn_if((PMP_LOCK & this_cfg), "pmp lock feature is not supported.\n");
-
+    if (pmpTable[pmp_index].pmpCfg & PMP_LOCK) {
+ DPRINTF(PMP, "Update pmp entry config %u failed because it locks\n",
+                pmp_index);
+        return false;
+    }
     pmpTable[pmp_index].pmpCfg = this_cfg;
     pmpUpdateRule(pmp_index);
-
+    return true;
 }

 void
@@ -170,6 +173,7 @@
     // pmpaddr/pmpcfg is written

     numRules = 0;
+    hasLockEntry = false;
     Addr prevAddr = 0;

     if (pmp_index >= 1) {
@@ -209,15 +213,43 @@
       if (PMP_OFF != a_field) {
           numRules++;
       }
+      hasLockEntry |= ((pmpTable[i].pmpCfg & PMP_LOCK) != 0);
+    }
+
+    if(hasLockEntry){
+        DPRINTF(PMP, "find lock entry");
     }
 }

 void
+PMP::pmpReset()
+{
+    for (uint32_t i = 0; i < pmpTable.size(); i++) {
+        pmpTable[i].pmpCfg &= ~(PMP_A_MASK | PMP_LOCK);
+        pmpUpdateRule(i);
+    }
+}
+
+bool
 PMP::pmpUpdateAddr(uint32_t pmp_index, Addr this_addr)
 {
     DPRINTF(PMP, "Update pmp addr %#x for pmp entry %u \n",
                                       this_addr, pmp_index);

+    if (pmpTable[pmp_index].pmpCfg & PMP_LOCK) {
+        DPRINTF(
+ PMP, "Update pmp entry %u failed because the lock bit set\n",
+                pmp_index);
+        return false;
+    } else if (pmp_index < pmpTable.size() - 1 &&
+ pmpTable[pmp_index+1].pmpCfg & (PMP_LOCK | (PMP_TOR << 3))) {
+        DPRINTF(
+ PMP, "Update pmp entry %u failed because the entry %u lock bit set"
+                "and A field if TOR\n",
+                pmp_index, pmp_index+1);
+        return false;
+    }
+
     // just writing the raw addr in the pmp table
     // will convert it into a range, once cfg
     // reg is written
@@ -225,6 +257,8 @@
     for (int index = 0; index < pmpEntries; index++) {
         pmpUpdateRule(index);
     }
+
+    return true;
 }

 bool
@@ -247,7 +281,11 @@
     bool cond3 = (mode != BaseMMU::Execute && (status.mprv)
     && (status.mpp != RiscvISA::PrivilegeMode::PRV_M));

-    return (cond1 || cond2 || cond3);
+    // data access and instruction fetch in m mode and lock enable
+    bool cond4 = (pmode == RiscvISA::PrivilegeMode::PRV_M) &&
+                  hasLockEntry;
+
+    return (cond1 || cond2 || cond3 || cond4);
 }

 AddrRange
diff --git a/src/arch/riscv/pmp.hh b/src/arch/riscv/pmp.hh
index 1509646..8538c79 100644
--- a/src/arch/riscv/pmp.hh
+++ b/src/arch/riscv/pmp.hh
@@ -45,6 +45,9 @@
 namespace gem5
 {

+namespace RiscvISA
+{
+
 /**
  * This class helps to implement RISCV's physical memory
  * protection (pmp) primitive.
@@ -85,12 +88,18 @@
     /** pmpcfg address range execute permission mask */
     const uint8_t PMP_EXEC = 1 << 2;

+    /** pmpcfg A field mask */
+    const uint8_t PMP_A_MASK = 3 << 3;
+
     /** pmpcfg address range locked mask */
     const uint8_t PMP_LOCK = 1 << 7;

     /** variable to keep track of active number of rules any time */
     int numRules;

+    /** variable to keep track of any lock of entry */
+    bool hasLockEntry;
+
     /** single pmp entry struct*/
     struct PmpEntry
     {
@@ -127,8 +136,9 @@
      * rule of corresponding pmp entry.
      * @param pmp_index pmp entry index.
      * @param this_cfg value to be written to pmpcfg.
+     * @returns true if update pmpicfg success
      */
-    void pmpUpdateCfg(uint32_t pmp_index, uint8_t this_cfg);
+    bool pmpUpdateCfg(uint32_t pmp_index, uint8_t this_cfg);

     /**
      * pmpUpdateAddr updates the pmpaddr for a pmp
@@ -136,8 +146,15 @@
      * rule of corresponding pmp entry.
      * @param pmp_index pmp entry index.
      * @param this_addr value to be written to pmpaddr.
+     * @returns true if update pmpaddri success
      */
-    void pmpUpdateAddr(uint32_t pmp_index, Addr this_addr);
+    bool pmpUpdateAddr(uint32_t pmp_index, Addr this_addr);
+
+    /**
+     * pmpReset reset when reset signal in trigger from
+     * CPU.
+     */
+    void pmpReset();

   private:
     /**
@@ -192,6 +209,8 @@

 };

+} // namespace RiscvISA
+
 } // namespace gem5

 #endif // __ARCH_RISCV_PMP_HH__

--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/68057?usp=email To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I3e7c5824d6c05f2ea928ee9ec7714f7271e4c58c
Gerrit-Change-Number: 68057
Gerrit-PatchSet: 1
Gerrit-Owner: Roger Chang <rogerycch...@google.com>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org

Reply via email to