Hello,

I have implemented a start of support for calling mlock() in a non-root
process, which I have attached.  I had to introduce a newer RPC since
the existing vm_wire RPC is done on the privileged host port. It for now
allows 64KiB mlocked memory per task (the default Linux value).

Could somebody review it? (in particular on the name of the RPC)

Samuel
diff --git a/include/mach/mach4.defs b/include/mach/mach4.defs
index 114edf4..4ecc0f5 100644
--- a/include/mach/mach4.defs
+++ b/include/mach/mach4.defs
@@ -110,3 +110,18 @@ routine memory_object_create_proxy(
                start           : vm_offset_array_t;
                len             : vm_offset_array_t;
                out proxy       : mach_port_t);
+
+
+/*
+ *     Specify that the range of the virtual address space
+ *     of the task must not cause page faults for
+ *     the indicated accesses.
+ *
+ *     [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+routine        vm_task_wire(
+               task            : vm_task_t;
+               address         : vm_address_t;
+               size            : vm_size_t;
+               access          : vm_prot_t);
+
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 6b13724..ae3ce21 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -208,6 +208,7 @@ void vm_map_setup(
        rbtree_init(&map->hdr.tree);
 
        map->size = 0;
+       map->user_wired = 0;
        map->ref_count = 1;
        map->pmap = pmap;
        map->min_offset = min;
@@ -1409,7 +1410,10 @@ kern_return_t vm_map_pageable_common(
 
                    if (user_wire) {
                        if (--(entry->user_wired_count) == 0)
+                       {
+                           map->user_wired -= entry->vme_end - 
entry->vme_start;
                            entry->wired_count--;
+                       }
                    }
                    else {
                        entry->wired_count--;
@@ -1486,7 +1490,10 @@ kern_return_t vm_map_pageable_common(
 
                    if (user_wire) {
                        if ((entry->user_wired_count)++ == 0)
+                       {
+                           map->user_wired += entry->vme_end - 
entry->vme_start;
                            entry->wired_count++;
+                       }
                    }
                    else {
                        entry->wired_count++;
@@ -1512,6 +1519,7 @@ kern_return_t vm_map_pageable_common(
                                (entry->vme_end > start)) {
                                    if (user_wire) {
                                        if (--(entry->user_wired_count) == 0)
+                                           map->user_wired -= entry->vme_end - 
entry->vme_start;
                                            entry->wired_count--;
                                    }
                                    else {
@@ -1627,6 +1635,8 @@ void vm_map_entry_delete(
            if (entry->wired_count != 0) {
                vm_fault_unwire(map, entry);
                entry->wired_count = 0;
+               if (entry->user_wired_count)
+                   map->user_wired -= entry->vme_end - entry->vme_start;
                entry->user_wired_count = 0;
            }
 
@@ -2274,6 +2284,8 @@ start_pass_1:
                        entry->offset = copy_entry->offset;
                        entry->needs_copy = copy_entry->needs_copy;
                        entry->wired_count = 0;
+                       if (entry->user_wired_count)
+                           dst_map->user_wired -= entry->vme_end - 
entry->vme_start;
                        entry->user_wired_count = 0;
 
                        vm_map_copy_entry_unlink(copy, copy_entry);
@@ -2869,6 +2881,7 @@ create_object:
 
        if (must_wire) {
                entry->wired_count = 1;
+               dst_map->user_wired += entry->vme_end - entry->vme_start;
                entry->user_wired_count = 1;
        } else {
                entry->wired_count = 0;
@@ -3954,6 +3967,8 @@ retry:
 
                                        assert(src_entry->wired_count > 0);
                                        src_entry->wired_count = 0;
+                                       if (src_entry->user_wired_count)
+                                           src_map->user_wired -= 
src_entry->vme_end - src_entry->vme_start;
                                        src_entry->user_wired_count = 0;
                                        unwire_end = src_entry->vme_end;
                                        pmap_pageable(vm_map_pmap(src_map),
diff --git a/vm/vm_map.h b/vm/vm_map.h
index fc7730a..9b31f90 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -170,6 +170,7 @@ struct vm_map {
 #define max_offset             hdr.links.end   /* end of range */
        pmap_t                  pmap;           /* Physical map */
        vm_size_t               size;           /* virtual size */
+       vm_size_t               user_wired;     /* wired by user size */
        int                     ref_count;      /* Reference count */
        decl_simple_lock_data(, ref_lock)       /* Lock for ref_count field */
        vm_map_entry_t          hint;           /* hint for quick lookups */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index f7c87cc..e62dc42 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -431,3 +431,37 @@ kern_return_t vm_wire(host, map, start, size, access)
                                    round_page(start+size),
                                    access);
 }
+
+/*
+ *     Specify that the range of the virtual address space
+ *     of the target task must not cause page faults for
+ *     the indicated accesses.
+ *
+ *     [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t vm_task_wire(task, start, size, access)
+       vm_map_t                task;
+       vm_offset_t             start;
+       vm_size_t               size;
+       vm_prot_t               access;
+{
+       if (task == VM_MAP_NULL)
+               return KERN_INVALID_TASK;
+
+       if (access & ~VM_PROT_ALL)
+               return KERN_INVALID_ARGUMENT;
+
+       /*Check if range includes projected buffer;
+         user is not allowed direct manipulation in that case*/
+       if (projected_buffer_in_range(task, start, start+size))
+               return(KERN_INVALID_ARGUMENT);
+
+       /* TODO: make it tunable */
+       if (access != VM_PROT_NONE && task->user_wired + size > 65536)
+               return KERN_NO_ACCESS;
+
+       return vm_map_pageable_user(task,
+                                   trunc_page(start),
+                                   round_page(start+size),
+                                   access);
+}
diff --git a/sysdeps/mach/hurd/mlock.c b/sysdeps/mach/hurd/mlock.c
index 14c311c..927c9e8 100644
--- a/sysdeps/mach/hurd/mlock.c
+++ b/sysdeps/mach/hurd/mlock.c
@@ -32,12 +32,18 @@ mlock (const void *addr, size_t len)
   vm_address_t page;
   error_t err;
 
+  page = trunc_page ((vm_address_t) addr);
+  len = round_page ((vm_address_t) addr + len) - page;
+
   err = __get_privileged_ports (&hostpriv, NULL);
   if (err)
-    return __hurd_fail (EPERM);
+    {
+      err = __vm_task_wire (__mach_task_self (), page, len, VM_PROT_READ);
+      if (err == EMIG_BAD_ID)
+       err = EPERM;
+      return err ? __hurd_fail (err) : 0;
+    }
 
-  page = trunc_page ((vm_address_t) addr);
-  len = round_page ((vm_address_t) addr + len) - page;
   err = __vm_wire (hostpriv, __mach_task_self (), page, len,
                   VM_PROT_READ);
   __mach_port_deallocate (__mach_task_self (), hostpriv);
diff --git a/sysdeps/mach/hurd/munlock.c b/sysdeps/mach/hurd/munlock.c
index c03af90..5a47f1d 100644
--- a/sysdeps/mach/hurd/munlock.c
+++ b/sysdeps/mach/hurd/munlock.c
@@ -31,12 +31,18 @@ munlock (const void *addr, size_t len)
   vm_address_t page;
   error_t err;
 
+  page = trunc_page ((vm_address_t) addr);
+  len = round_page ((vm_address_t) addr + len) - page;
+
   err = __get_privileged_ports (&hostpriv, NULL);
   if (err)
-    return __hurd_fail (EPERM);
+    {
+      err = __vm_task_wire (__mach_task_self (), page, len, VM_PROT_NONE);
+      if (err == EMIG_BAD_ID)
+       err = EPERM;
+      return err ? __hurd_fail (err) : 0;
+    }
 
-  page = trunc_page ((vm_address_t) addr);
-  len = round_page ((vm_address_t) addr + len) - page;
   err = __vm_wire (hostpriv, __mach_task_self (), page, len, VM_PROT_NONE);
   __mach_port_deallocate (__mach_task_self (), hostpriv);
 

Reply via email to