Hi Andreas,

I've created a patch on the top of the trunk,
Could you please help me to figure out what's the problem.

====patch start===

diff -r e2c43045a81b configs/common/CacheConfig.py
--- a/configs/common/CacheConfig.py    Tue Sep 09 04:36:43 2014 -0400
+++ b/configs/common/CacheConfig.py    Fri Sep 12 00:10:46 2014 +0800
@@ -1,6 +1,6 @@
 # Copyright (c) 2012-2013 ARM Limited
 # All rights reserved
-#
+#
 # The license below extends only to copyright in the software and shall
 # not be construed as granting a license to any other intellectual
 # property including but not limited to intellectual property relating
@@ -9,7 +9,7 @@
 # terms below provided that you ensure that this notice is replicated
 # unmodified and in its entirety in all distributions of the software,
 # modified or unmodified, in source code or in binary form.
-#
+#
 # Copyright (c) 2010 Advanced Micro Devices, Inc.
 # All rights reserved.
 #
@@ -67,14 +67,21 @@
         # are not connected using addTwoLevelCacheHierarchy. Use the
         # same clock as the CPUs, and set the L1-to-L2 bus width to 32
         # bytes (256 bits).
-        system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain,
-                                   size=options.l2_size,
-                                   assoc=options.l2_assoc)
+        l2s = [l2_cache_class(clk_domain=system.cpu_clk_domain,
+                              size=options.l2_size,
+                              assoc=options.l2_assoc)
+               for x in option.num_cpus]
 
-        system.tol2bus = CoherentBus(clk_domain = system.cpu_clk_domain,
-                                     width = 32)
-        system.l2.cpu_side = system.tol2bus.master
-        system.l2.mem_side = system.membus.slave
+        tol2buses = [CoherentBus(clk_domain=system.cpu_clk_domain,
+                                 width=32)
+                     for x in option.num_cpus]
+
+        for i, l2 in enumerate(l2s):
+            l2.cpu_side = tol2buses[i].master
+            l2.mem_side = system.membuses[i].slave
+
+        system.l2s = l2s
+        system.tol2buses = tol2buses
 
     for i in xrange(options.num_cpus):
         if options.caches:
@@ -93,8 +100,9 @@
                 system.cpu[i].addPrivateSplitL1Caches(icache, dcache)
         system.cpu[i].createInterruptController()
         if options.l2cache:
-            system.cpu[i].connectAllPorts(system.tol2bus, system.membus)
+            system.cpu[i].connectAllPorts(system.tol2buses[i],
+                                          system.membuses[i])
         else:
-            system.cpu[i].connectAllPorts(system.membus)
+            system.cpu[i].connectAllPorts(system.membuses[0])
 
     return system
diff -r e2c43045a81b configs/common/Caches.py
--- a/configs/common/Caches.py    Tue Sep 09 04:36:43 2014 -0400
+++ b/configs/common/Caches.py    Fri Sep 12 00:10:46 2014 +0800
@@ -72,6 +72,13 @@
     forward_snoops = False
     is_top_level = True
 
+class NUMACache(BaseCache):
+    assoc = 8
+    hit_latency = 50
+    response_latency = 50
+    mshrs = 20
+    tgts_per_mshr = 12
+
 class PageTableWalkerCache(BaseCache):
     assoc = 2
     hit_latency = 2
diff -r e2c43045a81b configs/common/FSConfig.py
--- a/configs/common/FSConfig.py    Tue Sep 09 04:36:43 2014 -0400
+++ b/configs/common/FSConfig.py    Fri Sep 12 00:10:46 2014 +0800
@@ -341,13 +341,13 @@
     interrupts_address_space_base = 0xa000000000000000
     APIC_range_size = 1 << 12;
 
-    x86_sys.membus = MemBus()
+    x86_sys.membuses = [MemBus() for x in range(numCPUs)]
 
     # North Bridge
     x86_sys.iobus = NoncoherentBus()
     x86_sys.bridge = Bridge(delay='50ns')
     x86_sys.bridge.master = x86_sys.iobus.slave
-    x86_sys.bridge.slave = x86_sys.membus.master
+    x86_sys.bridge.slave = x86_sys.membuses[0].master
     # Allow the bridge to pass through the IO APIC (two pages),
     # everything in the IO address range up to the local APIC, and
     # then the entire PCI address space and beyond
@@ -366,7 +366,7 @@
     # the local APIC (two pages)
     x86_sys.apicbridge = Bridge(delay='50ns')
     x86_sys.apicbridge.slave = x86_sys.iobus.master
-    x86_sys.apicbridge.master = x86_sys.membus.slave
+    x86_sys.apicbridge.master = x86_sys.membuses[0].slave
     x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
                                            interrupts_address_space_base +
                                            numCPUs * APIC_range_size
@@ -375,7 +375,7 @@
     # connect the io bus
     x86_sys.pc.attachIO(x86_sys.iobus)
 
-    x86_sys.system_port = x86_sys.membus.slave
+    x86_sys.system_port = x86_sys.membuses[0].slave
 
 def connectX86RubySystem(x86_sys):
     # North Bridge
diff -r e2c43045a81b configs/common/MemConfig.py
--- a/configs/common/MemConfig.py    Tue Sep 09 04:36:43 2014 -0400
+++ b/configs/common/MemConfig.py    Fri Sep 12 00:10:46 2014 +0800
@@ -153,7 +153,13 @@
     # For every range (most systems will only have one), create an
     # array of controllers and set their parameters to match their
     # address mapping in the case of a DRAM
-    for r in system.mem_ranges:
+
+    AddrRange = m5.objects.AddrRange
+    numa_mem_ranges = [AddrRange('0MB', size='128MB'),
+                       AddrRange('128MB', size='128MB'),
+                       AddrRange('256MB', size='128MB'),
+                       AddrRange('384MB', size='128MB')]
+    for r in numa_mem_ranges:
         for i in xrange(nbr_mem_ctrls):
             # Create an instance so we can figure out the address
             # mapping and row-buffer size
@@ -191,4 +197,4 @@
 
     # Connect the controllers to the membus
     for i in xrange(len(system.mem_ctrls)):
-        system.mem_ctrls[i].port = system.membus.master
+        system.mem_ctrls[i].port = system.membuses[i].master
diff -r e2c43045a81b configs/example/fs.py
--- a/configs/example/fs.py    Tue Sep 09 04:36:43 2014 -0400
+++ b/configs/example/fs.py    Fri Sep 12 00:10:46 2014 +0800
@@ -60,6 +60,7 @@
 import Simulation
 import CacheConfig
 import MemConfig
+import NUMAConfig
 from Caches import *
 import Options
 
@@ -177,7 +178,7 @@
         else:
             test_sys.iobridge = Bridge(delay='50ns', ranges =
test_sys.mem_ranges)
             test_sys.iobridge.slave = test_sys.iobus.master
-            test_sys.iobridge.master = test_sys.membus.slave
+            test_sys.iobridge.master = test_sys.membuses[0].slave
 
         # Sanity check
         if options.fastmem:
@@ -195,6 +196,7 @@
 
         CacheConfig.config_cache(options, test_sys)
         MemConfig.config_mem(options, test_sys)
+        NUMAConfig.config_numa(options, test_sys)
 
     return test_sys
 
====patch end ===

Regards,
Chen Houwu

On Fri, 5 Sep 2014 18:58:12 -0300, Matheus Alcântara Souza via
gem5-usersMatheus Alcântara Souza via gem5-users wrote:
> Hi all,
>
> About the ccNUMA implementation, Chen pointed out a problem when
> running his model. I tried to implement it too, and a segfault also
> occurs.
>
> Using --debug-flags=Cache, it seems to exist a loop until the segfault
> appears:
>
> .......it repeats........
> 0: system.numa_caches03: functional WriteReq 200000
> 0: system.l2s1: functional WriteReq 200000
> 0: system.numa_caches00: functional WriteReq 200000
> 0: system.iocache: functional WriteReq 200000
> 0: system.l2s0: functional WriteReq 200000
> 0: system.numa_caches03: functional WriteReq 200000
> 0: system.l2s1: functional WriteReq 200000
> 0: system.numa_caches00: functional WriteReq 200000
> 0: system.iocache: functional WriteReq 200000
> 0: system.l2s0: functional WriteReq 200000
> ..........it repeats...........
>
>
> Any idea?
>
>
> ---------
>
>
> Hi Chen,
>
> The only issue I can spot at this point is the "is_top_level" being set on 
> the 
> NUMACache. As there are caches above these NUMACaches the value should be 
> false. For the rest I don't dare say. Is there any chance you could turn this 
> into a patch that applies on top of the latest trunk?
> Thanks,
>
> Andreas
>
> From: Chen Houwu via gem5-users 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Reply-To: Chen Houwu <chenho...@gmail.com 
> <mailto:chenho...@gmail.com><mailto:chenho...@gmail.com>>, gem5 
> users mailing list <gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Date: Saturday, August 9, 2014 at 3:42 AM
> To: "gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>" 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Subject: Re: [gem5-users] help : cc-NUMA in gem5
>
> Hi Andreas,
> I just follow your instructions to build the NUMA config,
> but it does not work,
> gem5 would start to run and quit immediately,
> here is the debug ouput
>
> =======debug output begin=======
> command line: /vagrant/gem5/build/X86/gem5.opt gem5/configs/example/fs.py 
> --num-cpus=4 --disk-image=x86root-parsec.img --kernel=vmlinux
> Global frequency set at 1000000000000 ticks per second
> info: kernel located at: /vagrant/binaries/vmlinux
>       0: rtc: Real-time clock set to Sun Jan  1 00:00:00 2012
> Listening for com_1 connection on port 3456
> warn: Reading current count from inactive timer.
> 0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
> 0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001
> 0: system.remote_gdb.listener: listening for remote gdb #2 on port 7002
> 0: system.remote_gdb.listener: listening for remote gdb #3 on port 7003
>
> Program received signal SIGSEGV, Segmentation fault.
> 0x0000000000a845f2 in PacketQueue::checkFunctional (this=0x28aac98, 
> pkt=pkt@entry=0x7fffffffd250) at build/X86/mem/packet_queue.cc:71
> 71      {
> (gdb) list
> 66          sendDeferredPacket();
> 67      }
> =====debug output end======
>
>
> I am not proficient enough to debug this problem,
> so I want your help to point out what's wrong with my configs.
>
>
> I just created:
> A. a 4 cpu numa system,
> B. each cpu connect to a membus and then a dram controller,
> C. each dram is 128MB,
> D. every two membus is connected to each other by a Cache(in two directions),
> E. with address range set to the slave mebus's address range.
> F. IO devices are connected to the first node.
>
> Here is the modification I did for the NUMA model.
>
>
> ========diff start===============
> diff --git a/configs/common/CacheConfig.py b/configs/common/CacheConfig.py
> index b467b16..85f68b5 100644
> --- a/configs/common/CacheConfig.py
> +++ b/configs/common/CacheConfig.py
> @@ -67,14 +67,21 @@ def config_cache(options, system):
>          # are not connected using addTwoLevelCacheHierarchy. Use the
>          # same clock as the CPUs, and set the L1-to-L2 bus width to 32
>          # bytes (256 bits).
> -        system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain,
> -                                   size=options.l2_size,
> -                                   assoc=options.l2_assoc)
> +        l2s = [l2_cache_class(clk_domain=system.cpu_clk_domain,
> +                              size=options.l2_size,
> +                              assoc=options.l2_assoc)
> +               for x in option.num_cpus]
> +
> +        tol2buses = [CoherentBus(clk_domain=system.cpu_clk_domain,
> +                                 width=32)
> +                     for x in option.num_cpus]
> +
> +        for i, l2 in enumerate(l2s):
> +            l2.cpu_side = tol2buses[i].master
> +            l2.mem_side = system.membuses[i].slave
>
> -        system.tol2bus = CoherentBus(clk_domain = system.cpu_clk_domain,
> -                                     width = 32)
> -        system.l2.cpu_side = system.tol2bus.master
> -        system.l2.mem_side = system.membus.slave
> +        system.l2s = l2s
> +        system.tol2buses = tol2buses
>
>      for i in xrange(options.num_cpus):
>          if options.caches:
> @@ -93,8 +100,9 @@ def config_cache(options, system):
>                  system.cpu[i].addPrivateSplitL1Caches(icache, dcache)
>          system.cpu[i].createInterruptController()
>          if options.l2cache:
> -            system.cpu[i].connectAllPorts(system.tol2bus, system.membus)
> +            system.cpu[i].connectAllPorts(system.tol2buses[i],
> +                                          system.membuses[i])
>          else:
> -            system.cpu[i].connectAllPorts(system.membus)
> +            system.cpu[i].connectAllPorts(system.membuses[i])
>
>      return system
> diff --git a/configs/common/Caches.py b/configs/common/Caches.py
> index 9f7ac7a..0c1f7f1 100644
> --- a/configs/common/Caches.py
> +++ b/configs/common/Caches.py
> @@ -72,6 +72,14 @@ class IOCache(BaseCache):
>      forward_snoops = False
>      is_top_level = True
>
> +class NUMACache(BaseCache):
> +    assoc = 8
> +    hit_latency = 50
> +    response_latency = 50
> +    mshrs = 20
> +    tgts_per_mshr = 12
> +    is_top_level = True
> +
>  class PageTableWalkerCache(BaseCache):
>      assoc = 2
>      hit_latency = 2
> diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py
> index dc24adf..79518da 100644
> --- a/configs/common/FSConfig.py
> +++ b/configs/common/FSConfig.py
> @@ -331,13 +331,13 @@ def connectX86ClassicSystem(x86_sys, numCPUs):
>      interrupts_address_space_base = 0xa000000000000000
>      APIC_range_size = 1 << 12;
>
> -    x86_sys.membus = MemBus()
> +    x86_sys.membuses = [MemBus() for x in range(numCPUs)]
>
>      # North Bridge
>      x86_sys.iobus = NoncoherentBus()
>      x86_sys.bridge = Bridge(delay='50ns')
>      x86_sys.bridge.master = x86_sys.iobus.slave
> -    x86_sys.bridge.slave = x86_sys.membus.master
> +    x86_sys.bridge.slave = x86_sys.membuses[0].master
>      # Allow the bridge to pass through the IO APIC (two pages),
>      # everything in the IO address range up to the local APIC, and
>      # then the entire PCI address space and beyond
> @@ -356,7 +356,7 @@ def connectX86ClassicSystem(x86_sys, numCPUs):
>      # the local APIC (two pages)
>      x86_sys.apicbridge = Bridge(delay='50ns')
>      x86_sys.apicbridge.slave = x86_sys.iobus.master
> -    x86_sys.apicbridge.master = x86_sys.membus.slave
> +    x86_sys.apicbridge.master = x86_sys.membuses[0].slave
>      x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,
>                                             interrupts_address_space_base +
>                                             numCPUs * APIC_range_size
> @@ -365,7 +365,7 @@ def connectX86ClassicSystem(x86_sys, numCPUs):
>      # connect the io bus
>      x86_sys.pc.attachIO(x86_sys.iobus)
>
> -    x86_sys.system_port = x86_sys.membus.slave
> +    x86_sys.system_port = x86_sys.membuses[0].slave
>
>  def connectX86RubySystem(x86_sys):
>      # North Bridge
>
> diff --git a/configs/common/MemConfig.py b/configs/common/MemConfig.py
> index e954407..e8c623e 100644
> --- a/configs/common/MemConfig.py
> +++ b/configs/common/MemConfig.py
> @@ -153,7 +153,13 @@ def config_mem(options, system):
>      # For every range (most systems will only have one), create an
>      # array of controllers and set their parameters to match their
>      # address mapping in the case of a DRAM
> -    for r in system.mem_ranges:
> +
> +    AddrRange = m5.objects.AddrRange
> +    numa_mem_ranges = [AddrRange('0MB', size='128MB'),
> +                       AddrRange('128MB', size='128MB'),
> +                       AddrRange('256MB', size='128MB'),
> +                       AddrRange('384MB', size='128MB')]
> +    for r in numa_mem_ranges:
>          for i in xrange(nbr_mem_ctrls):
>              # Create an instance so we can figure out the address
>              # mapping and row-buffer size
> @@ -191,4 +197,4 @@ def config_mem(options, system):
>
>      # Connect the controllers to the membus
>      for i in xrange(len(system.mem_ctrls)):
> -        system.mem_ctrls[i].port = system.membus.master
> +        system.mem_ctrls[i].port = system.membuses[i].master
> diff --git a/configs/common/NUMAConfig.py b/configs/common/NUMAConfig.py
> new file mode 100644
> index 0000000..71ea0a5
> --- /dev/null
> +++ b/configs/common/NUMAConfig.py
> @@ -0,0 +1,86 @@
> +import itertools
> +
> +from m5.objects import *
> +from Benchmarks import *
> +from m5.util import *
> +from Caches import *
> +
> +def config_numa(options, system):
> +    link_nodes(options, system)
> +
> +def link_nodes(options, system):
> +    numa_caches = []
> +    for src, dst in itertools.permutations(range(options.num_cpus), r=2):
> +        cache = NUMACache(size="256kB",
> +                          addr_ranges=[system.mem_ctrls[dst].range])
> +        numa_caches.append(cache)
> +        cache.cpu_side = system.membuses[src].master
> +        cache.mem_side = system.membuses[dst].slave
> +    system.numa_caches = numa_caches
>
> diff --git a/configs/example/fs.py b/configs/example/fs.py
> index 5847482..de2b0eb 100644
> --- a/configs/example/fs.py
> +++ b/configs/example/fs.py
> @@ -60,6 +60,7 @@ from Benchmarks import *
>  import Simulation
>  import CacheConfig
>  import MemConfig
> +import NUMAConfig
>  from Caches import *
>  import Options
>
> @@ -177,7 +178,7 @@ def build_test_system(np):
>          else:
>              test_sys.iobridge = Bridge(delay='50ns', ranges = 
> test_sys.mem_ranges)
>              test_sys.iobridge.slave = test_sys.iobus.master
> -            test_sys.iobridge.master = test_sys.membus.slave
> +            test_sys.iobridge.master = test_sys.membuses[0].slave
>
>          # Sanity check
>          if options.fastmem:
> @@ -195,6 +196,7 @@ def build_test_system(np):
>
>          CacheConfig.config_cache(options, test_sys)
>          MemConfig.config_mem(options, test_sys)
> +        NUMAConfig.config_numa(options, test_sys)
>
>      return test_sys
> ========diff end===============
>
> Regards,
> Chen Houwu
>
> On Mon, 7 Apr 2014 08:32:55 +0100, Andreas Hansson 
> <andreas.hans...@arm.com 
> <mailto:andreas.hans...@arm.com>><mailto:andreas.hans...@arm.com> wrote:
> Hi Faris,
>
> I do not thing there are any existing examples out there. In essence, this is 
> what I would suggest (and believe should work):
>
> 1 change an existing script to create two completely separate CC-UMA 
> "chiplets" 
> with CPU clusters, along with their L2/L3, membus and (multi-channel) DRAM 
> controller
> 2 connect these two blobs with a "numa cache" in each direction, from membus 
> to 
> membus. Configured right, this will allow the CPUs in one of the chiplets to 
> talk to the DRAMs in the other in a coherent fashion.
>
> I hope that provides enough information to get you started.
>
> Andreas
>
> From: faris priadi <farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>>
> Reply-To: gem5 users mailing list 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Date: Sunday, 6 April 2014 09:47
> To: gem5 users mailing list <gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Subject: Re: [gem5-users] help : cc-NUMA in gem5
>
> Hi,
> I tried to configure NUMA in  gem5 , but i'm still stuck. Actually about 
> create 
> own memory controller each CPU, because that i found in internet and forums 
> were about multi-channels in gem5. Isn't similar configuring multi-channels 
> and 
> adding own memory controller for each CPU in gem5? thanks
>
>
> On Tue, Apr 1, 2014 at 11:57 PM, faris priadi 
> <farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>> wrote:
> Hi Andreas,
>
> Ok , I will try it first dan try to understand implement my model to gem5  . 
> anyway i have tried to increase memory size to 4GB with --mem-size in ALPHA 
> Fullsystem but this following error happened :
>
> 1146167500: system.cpu00.break_event: break event panic triggered
> fatal: Unable to find destination for addr 0xfffff00188 on bus system.piobus
>  @ cycle 14723200500
> [findPort:build/ALPHA_FS/mem/bus.cc, line 353]
> Memory Usage: 5619064 KBytes
>
> Do you know what error it is ? Can you help me solve this? . Thanks
>
>
> On Tue, Apr 1, 2014 at 2:48 PM, Andreas Hansson 
> <andreas.hans...@arm.com 
> <mailto:andreas.hans...@arm.com><mailto:andreas.hans...@arm.com>> wrote:
> Hi Faris,
>
> Not at all. You simply assemble the existing building blocks in a slightly 
> different manner. You should be able to do this entirely through the Python 
> scripts.
>
> Andreas
>
> From: faris priadi <farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>>
> Reply-To: gem5 users mailing list 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Date: Tuesday, 1 April 2014 03:13
>
> To: gem5 users mailing list <gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Subject: Re: [gem5-users] help : cc-NUMA in gem5
>
> Thank you  Andreas for quick response.actually i still little bit confuse . 
> Do 
> i also have to modify .cc/hh file manually to create NUMA model ?
>
>
> On Tue, Apr 1, 2014 at 1:46 AM, Andreas Hansson 
> <andreas.hans...@arm.com 
> <mailto:andreas.hans...@arm.com><mailto:andreas.hans...@arm.com>> wrote:
> Hi Faris,
>
> The short answer is yes. You will have to manually adapt se.py or fs.py based 
> on your needs. You could, for example, create two clusters of CPUs, each with 
> their own LLC and memory controllers (and memory ranges), and then connect 
> their membus instances with a "glue" cache.
>
> Andreas
>
> From: faris priadi <farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>>
> Reply-To: gem5 users mailing list 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Date: Monday, 31 March 2014 18:58
> To: "gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>" 
> <gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>>
> Subject: [gem5-users] help : cc-NUMA in gem5
>
> Hello everyone ,
> I have final project about cc-NUMA  and now i try to use gem5 , but i need 
> help 
> to configure NUMA in gem5. Can gem5 be used to simulate cc-NUMA system ?
>
> --
> Best Regards,
> Faris Priadi ,Amd.
>
> 082121325532<tel:082121325532>
> farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>
>
> -- IMPORTANT NOTICE: The contents of this email and any attachments are 
> confidential and may also be privileged. If you are not the intended 
> recipient, 
> please notify the sender immediately and do not disclose the contents to any 
> other person, use it for any purpose, or store or copy the information in any 
> medium. Thank you.
>
> ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered 
> in England & Wales, Company No: 2557590
> ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered in England & Wales, Company No: 2548782
>
> _______________________________________________
> gem5-users mailing list
> gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>
> http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users
>
>
>
> --
> Best Regards,
> Faris Priadi ,Amd.
>
> 082121325532<tel:082121325532>
> farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>
>
> -- IMPORTANT NOTICE: The contents of this email and any attachments are 
> confidential and may also be privileged. If you are not the intended 
> recipient, 
> please notify the sender immediately and do not disclose the contents to any 
> other person, use it for any purpose, or store or copy the information in any 
> medium. Thank you.
>
> ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered 
> in England & Wales, Company No: 2557590
> ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered in England & Wales, Company No: 2548782
>
> _______________________________________________
> gem5-users mailing list
> gem5-users@gem5.org <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>
> http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users
>
>
>
> --
> Best Regards,
> Faris Priadi
>
> farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>
>
>
>
> --
> Best Regards,
> Faris Priadi ,Amd.
>
> 082121325532
> farispri...@gmail.com 
> <mailto:farispri...@gmail.com><mailto:farispri...@gmail.com>
>
> -- IMPORTANT NOTICE: The contents of this email and any attachments are 
> confidential and may also be privileged. If you are not the intended 
> recipient, 
> please notify the sender immediately and do not disclose the contents to any 
> other person, use it for any purpose, or store or copy the information in any 
> medium. Thank you.
>
> ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered 
> in England & Wales, Company No: 2557590
> ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered in England & Wales, Company No: 2548782
>
>
>
> _______________________________________________
> gem5-users mailing list
> gem5-users@gem5.org 
> <mailto:gem5-users@gem5.org><mailto:gem5-users@gem5.org>http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users
>
> -- IMPORTANT NOTICE: The contents of this email and any attachments are 
> confidential and may also be privileged. If you are not the intended 
> recipient, 
> please notify the sender immediately and do not disclose the contents to any 
> other person, use it for any purpose, or store or copy the information in any 
> medium. Thank you.
>
> ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered 
> in England & Wales, Company No: 2557590
> ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ, 
> Registered in England & Wales, Company No: 2548782
> _______________________________________________
> gem5-users mailing list
> gem5-users@gem5.org <mailto:gem5-users@gem5.org>
> http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users
>
>
> _______________________________________________
> gem5-users mailing list
> gem5-users@gem5.org
> http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users
_______________________________________________
gem5-users mailing list
gem5-users@gem5.org
http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users

Reply via email to