On Thu, Dec 05, 2019 at 07:25:51PM +0100, Martin Pieuchot wrote:
> Following cleanup diff:
> 
> - reduces gratuitous differences with NetBSD,
> - merges multiple '#ifdef _KERNEL' blocks,
> - kills unused 'struct vm_map_intrsafe'
> - turns 'union vm_map_object' into a anonymous union (following to NetBSD)
> - move questionable vm_map_modflags() into uvm/uvm_map.c
> - remove guards around MAX_KMAPENT, it is defined&used only once
> - document lock differences
> - fix tab vs space
> 
> Ok?
> 

ok mlarkin

> Index: uvm/uvm_extern.h
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
> retrieving revision 1.151
> diff -u -p -r1.151 uvm_extern.h
> --- uvm/uvm_extern.h  29 Nov 2019 06:34:45 -0000      1.151
> +++ uvm/uvm_extern.h  5 Dec 2019 16:06:33 -0000
> @@ -65,9 +65,6 @@ typedef int vm_fault_t;
>  typedef int vm_inherit_t;    /* XXX: inheritance codes */
>  typedef off_t voff_t;                /* XXX: offset within a uvm_object */
>  
> -union vm_map_object;
> -typedef union vm_map_object vm_map_object_t;
> -
>  struct vm_map_entry;
>  typedef struct vm_map_entry *vm_map_entry_t;
>  
> Index: uvm/uvm_map.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> retrieving revision 1.256
> diff -u -p -r1.256 uvm_map.c
> --- uvm/uvm_map.c     4 Dec 2019 08:28:29 -0000       1.256
> +++ uvm/uvm_map.c     5 Dec 2019 16:27:22 -0000
> @@ -230,7 +230,6 @@ void                       vmspace_validate(struct 
> vm_map*)
>  #define PMAP_PREFER(addr, off)       (addr)
>  #endif
>  
> -
>  /*
>   * The kernel map will initially be VM_MAP_KSIZE_INIT bytes.
>   * Every time that gets cramped, we grow by at least VM_MAP_KSIZE_DELTA 
> bytes.
> @@ -334,6 +333,14 @@ vaddr_t uvm_maxkaddr;
>                               MUTEX_ASSERT_LOCKED(&(_map)->mtx);      \
>               }                                                       \
>       } while (0)
> +
> +#define      vm_map_modflags(map, set, clear)                                
> \
> +     do {                                                            \
> +             mtx_enter(&(map)->flags_lock);                          \
> +             (map)->flags = ((map)->flags | (set)) & ~(clear);       \
> +             mtx_leave(&(map)->flags_lock);                          \
> +     } while (0)
> +
>  
>  /*
>   * Tree describing entries by address.
> Index: uvm/uvm_map.h
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.h,v
> retrieving revision 1.65
> diff -u -p -r1.65 uvm_map.h
> --- uvm/uvm_map.h     29 Nov 2019 06:34:46 -0000      1.65
> +++ uvm/uvm_map.h     5 Dec 2019 16:26:09 -0000
> @@ -86,16 +86,6 @@
>  #ifdef _KERNEL
>  
>  /*
> - * Internal functions.
> - *
> - * Required by clipping macros.
> - */
> -void                  uvm_map_clip_end(struct vm_map*, struct vm_map_entry*,
> -                         vaddr_t);
> -void                  uvm_map_clip_start(struct vm_map*,
> -                         struct vm_map_entry*, vaddr_t);
> -
> -/*
>   * UVM_MAP_CLIP_START: ensure that the entry begins at or after
>   * the starting address, if it doesn't we split the entry.
>   * 
> @@ -133,26 +123,6 @@ void                      uvm_map_clip_start(struct 
> vm_map
>  #include <uvm/uvm_anon.h>
>  
>  /*
> - * types defined:
> - *
> - *   vm_map_t                the high-level address map data structure.
> - *   vm_map_entry_t          an entry in an address map.
> - *   vm_map_version_t        a timestamp of a map, for use with vm_map_lookup
> - */
> -
> -/*
> - * Objects which live in maps may be either VM objects, or another map
> - * (called a "sharing map") which denotes read-write sharing with other maps.
> - *
> - * XXXCDC: private pager data goes here now
> - */
> -
> -union vm_map_object {
> -     struct uvm_object       *uvm_obj;       /* UVM OBJECT */
> -     struct vm_map           *sub_map;       /* belongs to another map */
> -};
> -
> -/*
>   * Address map entries consist of start and end addresses,
>   * a VM object (or sharing map) and offset into that object,
>   * and user-exported inheritance and protection information.
> @@ -177,23 +147,23 @@ struct vm_map_entry {
>       vsize_t                 guard;          /* bytes in guard */
>       vsize_t                 fspace;         /* free space */
>  
> -     union vm_map_object     object;         /* object I point to */
> +     union {
> +             struct uvm_object *uvm_obj;     /* uvm object */
> +             struct vm_map   *sub_map;       /* belongs to another map */
> +     } object;                               /* object I point to */
>       voff_t                  offset;         /* offset into object */
>       struct vm_aref          aref;           /* anonymous overlay */
> -
>       int                     etype;          /* entry type */
> -
>       vm_prot_t               protection;     /* protection code */
>       vm_prot_t               max_protection; /* maximum protection */
>       vm_inherit_t            inheritance;    /* inheritance */
> -
>       int                     wired_count;    /* can be paged if == 0 */
>       int                     advice;         /* madvise advice */
>  #define uvm_map_entry_stop_copy flags
>       u_int8_t                flags;          /* flags */
>  
> -#define UVM_MAP_STATIC               0x01            /* static map entry */
> -#define UVM_MAP_KMEM         0x02            /* from kmem entry pool */
> +#define      UVM_MAP_STATIC          0x01            /* static map entry */
> +#define      UVM_MAP_KMEM            0x02            /* from kmem entry pool 
> */
>  
>       vsize_t                 fspace_augment; /* max(fspace) in subtree */
>  };
> @@ -278,7 +248,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry
>   * If that allocation fails:
>   * - vmspace maps will spill over into vm_map.bfree,
>   * - all other maps will call uvm_map_kmem_grow() to increase the arena.
> - * 
> + *
>   * vmspace maps have their data, brk() and stack arenas automatically
>   * updated when uvm_map() is invoked without MAP_FIXED.
>   * The spill over arena (vm_map.bfree) will contain the space in the brk()
> @@ -294,8 +264,8 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry
>   */
>  struct vm_map {
>       struct pmap             *pmap;          /* [I] Physical map */
> -     struct rwlock           lock;           /* Lock for map data */
> -     struct mutex            mtx;
> +     struct rwlock           lock;           /* Non-intrsafe lock */
> +     struct mutex            mtx;            /* Intrsafe lock */
>       u_long                  sserial;        /* [v] # stack changes */
>       u_long                  wserial;        /* [v] # PROT_WRITE increases */
>  
> @@ -348,75 +318,58 @@ struct vm_map {
>  #define      VM_MAP_WIREFUTURE       0x04            /* rw: wire future 
> mappings */
>  #define      VM_MAP_BUSY             0x08            /* rw: map is busy */
>  #define      VM_MAP_WANTLOCK         0x10            /* rw: want to 
> write-lock */
> -#define VM_MAP_GUARDPAGES    0x20            /* rw: add guard pgs to map */
> -#define VM_MAP_ISVMSPACE     0x40            /* ro: map is a vmspace */
> -#define VM_MAP_SYSCALL_ONCE  0x80            /* rw: libc syscall registered 
> */
> +#define      VM_MAP_GUARDPAGES       0x20            /* rw: add guard pgs to 
> map */
> +#define      VM_MAP_ISVMSPACE        0x40            /* ro: map is a vmspace 
> */
> +#define      VM_MAP_SYSCALL_ONCE     0x80            /* rw: libc syscall 
> registered */
>  
> -/* XXX: number of kernel maps and entries to statically allocate */
> -
> -#if !defined(MAX_KMAPENT)
> +/* Number of kernel maps and entries to statically allocate */
>  #define      MAX_KMAPENT     1024    /* Sufficient to make it to the 
> scheduler. */
> -#endif       /* !defined MAX_KMAPENT */
>  
>  #ifdef _KERNEL
> -#define      vm_map_modflags(map, set, clear)                                
> \
> -do {                                                                 \
> -     mtx_enter(&(map)->flags_lock);                                  \
> -     (map)->flags = ((map)->flags | (set)) & ~(clear);               \
> -     mtx_leave(&(map)->flags_lock);                                  \
> -} while (0)
> -#endif /* _KERNEL */
> -
> -/*
> - *   Interrupt-safe maps must also be kept on a special list,
> - *   to assist uvm_fault() in avoiding locking problems.
> - */
> -struct vm_map_intrsafe {
> -     struct vm_map   vmi_map;
> -     LIST_ENTRY(vm_map_intrsafe) vmi_list;
> -};
> -
>  /*
>   * globals:
>   */
>  
> -#ifdef _KERNEL
> -
>  extern vaddr_t       uvm_maxkaddr;
>  
>  /*
>   * protos: the following prototypes define the interface to vm_map
>   */
>  
> -void         uvm_map_deallocate(vm_map_t);
> +void         uvm_map_deallocate(struct vm_map *);
>  
> -int          uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int);
> -vm_map_t     uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
> -int          uvm_map_extract(struct vm_map*, vaddr_t, vsize_t, vaddr_t*,
> -                 int);
> +int          uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
> +void         uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
> +                 vaddr_t);
> +void         uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
> +                 vaddr_t);
> +int          uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
> +                 vaddr_t *, int);
> +struct vm_map *      uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
>  vaddr_t              uvm_map_pie(vaddr_t);
>  vaddr_t              uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, 
> vaddr_t);
> -int          uvm_map_syscall(vm_map_t, vaddr_t, vaddr_t);
> -int          uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t);
> -int          uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int);
> +int          uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t);
> +int          uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, 
> vm_inherit_t);
> +int          uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
>  void         uvm_map_init(void);
> -boolean_t    uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *);
> -boolean_t    uvm_map_is_stack_remappable(vm_map_t, vaddr_t, vsize_t);
> +boolean_t    uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t 
> *);
> +boolean_t    uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t);
>  int          uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t);
> -int          uvm_map_replace(vm_map_t, vaddr_t, vaddr_t,
> +int          uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
>                   vm_map_entry_t, int);
> -int          uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t,
> +int          uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
>                   vaddr_t *);
> -void         uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int);
> -int          uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t);
> -void         uvm_unmap(vm_map_t, vaddr_t, vaddr_t);
> +void         uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
> +int          uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
> +                 struct vm_map *);
> +void         uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
> +void         uvm_unmap_detach(struct uvm_map_deadq *, int);
> +void         uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
> +                 struct uvm_map_deadq *, boolean_t, boolean_t);
>  void         uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
>                   struct uvm_addr_state*);
>  int          uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
>  
> -void         uvm_unmap_detach(struct uvm_map_deadq*, int);
> -void         uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
> -                 struct uvm_map_deadq*, boolean_t, boolean_t);
>  
>  struct p_inentry;
>  
> @@ -430,8 +383,6 @@ struct kinfo_vmentry;
>  int          uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *,
>                   size_t *);
>  
> -#endif /* _KERNEL */
> -
>  /*
>   * VM map locking operations:
>   *
> @@ -458,7 +409,6 @@ int               uvm_map_fill_vmmap(struct vm_map *,
>   *
>   */
>  
> -#ifdef _KERNEL
>  /*
>   * XXX: clean up later
>   * Half the kernel seems to depend on them being included here.
> 

Reply via email to