Justus Winter, le Wed 08 Jul 2015 16:22:28 +0200, a écrit :
> Alternatively, you could re-purpose the existing RPC `vm_wire',
> changing the type of its first argument from `host_priv_t' to `host_t'
> (this is backwards compatible as the privileged host control port is
> also a host port), and changing the behavior slightly depending on
> whether the user passed the priv port or a normal host port.

Ok, so it'd look like this? (I can't reuse the intran, since the
obtained host_t is the same in non-priv and priv cases).

Samuel
diff --git a/doc/mach.texi b/doc/mach.texi
index 59872c9..2d127fb 100644
--- a/doc/mach.texi
+++ b/doc/mach.texi
@@ -3241,14 +3241,15 @@ successfully set and @code{KERN_INVALID_ADDRESS} if an 
invalid or
 non-allocated address was specified.
 @end deftypefun
 
-@deftypefun kern_return_t vm_wire (@w{host_priv_t @var{host_priv}}, 
@w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t 
@var{size}}, @w{vm_prot_t @var{access}})
-The function @code{vm_wire} allows privileged applications to control
-memory pageability.  @var{host_priv} is the privileged host port for the
+@deftypefun kern_return_t vm_wire (@w{host_t @var{host}}, @w{vm_task_t 
@var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, 
@w{vm_prot_t @var{access}})
+The function @code{vm_wire} allows applications to control
+memory pageability.  @var{host} is the host port for the
 host on which @var{target_task} resides.  @var{address} is the starting
 address, which will be rounded down to a page boundary.  @var{size} is
 the size in bytes of the region for which protection is to change, and
 will be rounded up to give a page boundary.  @var{access} specifies the
-types of accesses that must not cause page faults.
+types of accesses that must not cause page faults.  If the host port is
+not privileged, the amount of memory is limited per task.
 
 The semantics of a successful @code{vm_wire} operation are that memory
 in the specified range will not cause page faults for any accesses
@@ -3257,7 +3258,7 @@ access argument of @code{VM_PROT_READ | VM_PROT_WRITE}.  
A special case
 is that @code{VM_PROT_NONE} makes the memory pageable.
 
 The function returns @code{KERN_SUCCESS} if the call succeeded,
-@code{KERN_INVALID_HOST} if @var{host_priv} was not the privileged host
+@code{KERN_INVALID_HOST} if @var{host} was not the privileged host
 port, @code{KERN_INVALID_TASK} if @var{task} was not a valid task,
 @code{KERN_INVALID_VALUE} if @var{access} specified an invalid access
 mode, @code{KERN_FAILURE} if some memory in the specified range is not
@@ -3265,7 +3266,7 @@ present or has an inappropriate protection value, and
 @code{KERN_INVALID_ARGUMENT} if unwiring (@var{access} is
 @code{VM_PROT_NONE}) and the memory is not already wired.
 
-The @code{vm_wire} call is actually an RPC to @var{host_priv}, normally
+The @code{vm_wire} call is actually an RPC to @var{host}, normally
 a send right for a privileged host port, but potentially any send right.
 In addition to the normal diagnostic return codes from the call's server
 (normally the kernel), the call may return @code{mach_msg} return codes.
diff --git a/include/mach/mach_host.defs b/include/mach/mach_host.defs
index 6699a50..28439a0 100644
--- a/include/mach/mach_host.defs
+++ b/include/mach/mach_host.defs
@@ -296,7 +296,7 @@ routine host_reboot(
  *     [ To unwire the pages, specify VM_PROT_NONE. ]
  */
 routine        vm_wire(
-               host_priv       : host_priv_t;
+               host            : mach_port_t;
                task            : vm_task_t;
                address         : vm_address_t;
                size            : vm_size_t;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 6b13724..ae3ce21 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -208,6 +208,7 @@ void vm_map_setup(
        rbtree_init(&map->hdr.tree);
 
        map->size = 0;
+       map->user_wired = 0;
        map->ref_count = 1;
        map->pmap = pmap;
        map->min_offset = min;
@@ -1409,7 +1410,10 @@ kern_return_t vm_map_pageable_common(
 
                    if (user_wire) {
                        if (--(entry->user_wired_count) == 0)
+                       {
+                           map->user_wired -= entry->vme_end - 
entry->vme_start;
                            entry->wired_count--;
+                       }
                    }
                    else {
                        entry->wired_count--;
@@ -1486,7 +1490,10 @@ kern_return_t vm_map_pageable_common(
 
                    if (user_wire) {
                        if ((entry->user_wired_count)++ == 0)
+                       {
+                           map->user_wired += entry->vme_end - 
entry->vme_start;
                            entry->wired_count++;
+                       }
                    }
                    else {
                        entry->wired_count++;
@@ -1512,6 +1519,7 @@ kern_return_t vm_map_pageable_common(
                                (entry->vme_end > start)) {
                                    if (user_wire) {
                                        if (--(entry->user_wired_count) == 0)
+                                           map->user_wired -= entry->vme_end - 
entry->vme_start;
                                            entry->wired_count--;
                                    }
                                    else {
@@ -1627,6 +1635,8 @@ void vm_map_entry_delete(
            if (entry->wired_count != 0) {
                vm_fault_unwire(map, entry);
                entry->wired_count = 0;
+               if (entry->user_wired_count)
+                   map->user_wired -= entry->vme_end - entry->vme_start;
                entry->user_wired_count = 0;
            }
 
@@ -2274,6 +2284,8 @@ start_pass_1:
                        entry->offset = copy_entry->offset;
                        entry->needs_copy = copy_entry->needs_copy;
                        entry->wired_count = 0;
+                       if (entry->user_wired_count)
+                           dst_map->user_wired -= entry->vme_end - 
entry->vme_start;
                        entry->user_wired_count = 0;
 
                        vm_map_copy_entry_unlink(copy, copy_entry);
@@ -2869,6 +2881,7 @@ create_object:
 
        if (must_wire) {
                entry->wired_count = 1;
+               dst_map->user_wired += entry->vme_end - entry->vme_start;
                entry->user_wired_count = 1;
        } else {
                entry->wired_count = 0;
@@ -3954,6 +3967,8 @@ retry:
 
                                        assert(src_entry->wired_count > 0);
                                        src_entry->wired_count = 0;
+                                       if (src_entry->user_wired_count)
+                                           src_map->user_wired -= 
src_entry->vme_end - src_entry->vme_start;
                                        src_entry->user_wired_count = 0;
                                        unwire_end = src_entry->vme_end;
                                        pmap_pageable(vm_map_pmap(src_map),
diff --git a/vm/vm_map.h b/vm/vm_map.h
index fc7730a..9b31f90 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -170,6 +170,7 @@ struct vm_map {
 #define max_offset             hdr.links.end   /* end of range */
        pmap_t                  pmap;           /* Physical map */
        vm_size_t               size;           /* virtual size */
+       vm_size_t               user_wired;     /* wired by user size */
        int                     ref_count;      /* Reference count */
        decl_simple_lock_data(, ref_lock)       /* Lock for ref_count field */
        vm_map_entry_t          hint;           /* hint for quick lookups */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index f7c87cc..8c7a5d8 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -405,15 +405,32 @@ kern_return_t vm_map(
  *
  *     [ To unwire the pages, specify VM_PROT_NONE. ]
  */
-kern_return_t vm_wire(host, map, start, size, access)
-       const host_t            host;
+kern_return_t vm_wire(port, map, start, size, access)
+       const ipc_port_t        port;
        vm_map_t                map;
        vm_offset_t             start;
        vm_size_t               size;
        vm_prot_t               access;
 {
-       if (host == HOST_NULL)
+       host_t host;
+       boolean_t priv;
+
+       if (!IP_VALID(port))
+               return KERN_INVALID_HOST;
+
+       ip_lock(port);
+       if (!ip_active(port) ||
+                 (ip_kotype(port) != IKOT_HOST_PRIV
+               && ip_kotype(port) != IKOT_HOST))
+       {
+               ip_unlock(port);
                return KERN_INVALID_HOST;
+       }
+
+       priv = ip_kotype(port) == IKOT_HOST_PRIV;
+       ip_unlock(port);
+
+       host = (host_t) port->ip_kobject;
 
        if (map == VM_MAP_NULL)
                return KERN_INVALID_TASK;
@@ -426,6 +443,10 @@ kern_return_t vm_wire(host, map, start, size, access)
        if (projected_buffer_in_range(map, start, start+size))
                return(KERN_INVALID_ARGUMENT);
 
+       /* TODO: make it tunable */
+       if (!priv && access != VM_PROT_NONE && map->user_wired + size > 65536)
+               return KERN_NO_ACCESS;
+
        return vm_map_pageable_user(map,
                                    trunc_page(start),
                                    round_page(start+size),

Reply via email to