Earl Ou has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/34696 )

Change subject: base: avoid std::mutex by implementing a spinlock
......................................................................

base: avoid std::mutex by implementing a spinlock

Eventq and systemC scheduler use std::mutex to protect critical section
across threads. However, their use cases are usually low contention
(e.g., async IO). Acquiring std::mutex is considered expensive in such
cases.

In this change we implement a spinlock. A test in systemC shows about
12% speed improvement.

Change-Id: I3da946bb74ecd1f00a3009c8c4b8d5245667291e
---
A src/base/lock.hh
M src/sim/eventq.hh
M src/systemc/core/scheduler.cc
M src/systemc/core/scheduler.hh
4 files changed, 18 insertions(+), 6 deletions(-)



diff --git a/src/base/lock.hh b/src/base/lock.hh
new file mode 100644
index 0000000..7167ee7
--- /dev/null
+++ b/src/base/lock.hh
@@ -0,0 +1,9 @@
+#include <atomic>
+class SpinLock
+{
+  private:
+    std::atomic_flag flag = ATOMIC_FLAG_INIT;
+  public:
+    void lock() { while (flag.test_and_set(std::memory_order_acquire)); }
+    void unlock() { flag.clear(std::memory_order_release); }
+};
diff --git a/src/sim/eventq.hh b/src/sim/eventq.hh
index aa54722..107648f 100644
--- a/src/sim/eventq.hh
+++ b/src/sim/eventq.hh
@@ -46,6 +46,7 @@

 #include "base/debug.hh"
 #include "base/flags.hh"
+#include "base/lock.hh"
 #include "base/types.hh"
 #include "debug/Event.hh"
 #include "sim/serialize.hh"
@@ -622,7 +623,7 @@
     Tick _curTick;

     //! Mutex to protect async queue.
-    std::mutex async_queue_mutex;
+    SpinLock async_queue_mutex;

     //! List of events added by other threads to this event queue.
     std::list<Event*> async_queue;
@@ -647,7 +648,7 @@
      * @see EventQueue::lock()
      * @see EventQueue::unlock()
      */
-    std::mutex service_mutex;
+    SpinLock service_mutex;

     //! Insert / remove event from the queue. Should only be called
     //! by thread operating this queue.
diff --git a/src/systemc/core/scheduler.cc b/src/systemc/core/scheduler.cc
index 50a1e6b..b1a5e9f 100644
--- a/src/systemc/core/scheduler.cc
+++ b/src/systemc/core/scheduler.cc
@@ -27,6 +27,8 @@

 #include "systemc/core/scheduler.hh"

+#include <mutex>
+
 #include "base/fiber.hh"
 #include "base/logging.hh"
 #include "sim/eventq.hh"
@@ -257,7 +259,7 @@
 void
 Scheduler::asyncRequestUpdate(Channel *c)
 {
-    std::lock_guard<std::mutex> lock(asyncListMutex);
+    std::lock_guard<SpinLock> lock(asyncListMutex);
     asyncUpdateList.pushLast(c);
 }

@@ -326,7 +328,7 @@
 {
     status(StatusUpdate);
     {
-        std::lock_guard<std::mutex> lock(asyncListMutex);
+        std::lock_guard<SpinLock> lock(asyncListMutex);
         Channel *channel;
         while ((channel = asyncUpdateList.getNext()) != nullptr)
             updateList.pushLast(channel);
diff --git a/src/systemc/core/scheduler.hh b/src/systemc/core/scheduler.hh
index 273faf7..38ddc33 100644
--- a/src/systemc/core/scheduler.hh
+++ b/src/systemc/core/scheduler.hh
@@ -28,10 +28,10 @@
 #ifndef __SYSTEMC_CORE_SCHEDULER_HH__
 #define __SYSTEMC_CORE_SCHEDULER_HH__

+#include <atomic>
 #include <functional>
 #include <list>
 #include <map>
-#include <mutex>
 #include <set>
 #include <vector>

@@ -528,7 +528,7 @@
     ChannelList updateList;

     ChannelList asyncUpdateList;
-    std::mutex asyncListMutex;
+    SpinLock asyncListMutex;

     std::map<::Event *, Tick> eventsToSchedule;


--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/34696
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I3da946bb74ecd1f00a3009c8c4b8d5245667291e
Gerrit-Change-Number: 34696
Gerrit-PatchSet: 1
Gerrit-Owner: Earl Ou <[email protected]>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- [email protected]
To unsubscribe send an email to [email protected]
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to