> On Sun, Mar 23, 2014 at 20:10, Miod Vallat wrote:
>
> > Except that 1. this is ugly unless you express the values in hex, as
> > noone can spot these are multiples of 4096 in decimal, and 2. I think it
> > is more important to make clear that nback == nforw - 1 (or + 1 in the
> > SEQ case), regardless of the page size (unless page sizes are so huge
> > both values are zero).
>
> Oh, I reached the opposite conclusion. :) Regardless of page size, the
> amount (in bytes) to be read should be the same. The tight coupling is
> not a feature. Your diff actually makes future tuning more difficult,
> imo, but given that nobody has appeared to care and it makes your
> computer better, I won't object too strenuously.
What about this diff, then? It's close to yours but using shifts instead
of integer divide (as PAGE_SIZE may not be a constant on all platforms).
Index: uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.70
diff -u -p -r1.70 uvm_fault.c
--- uvm_fault.c 31 Mar 2014 20:16:39 -0000 1.70
+++ uvm_fault.c 1 Apr 2014 17:31:03 -0000
@@ -152,22 +152,15 @@
*/
struct uvm_advice {
- int advice;
int nback;
int nforw;
};
/*
- * page range array:
- * note: index in array must match "advice" value
- * XXX: borrowed numbers from freebsd. do they work well for us?
+ * page range array: set up in uvmfault_init().
*/
-static struct uvm_advice uvmadvice[] = {
- { MADV_NORMAL, 3, 4 },
- { MADV_RANDOM, 0, 0 },
- { MADV_SEQUENTIAL, 8, 7},
-};
+static struct uvm_advice uvmadvice[UVM_ADV_MASK + 1];
#define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */
@@ -220,6 +213,28 @@ uvmfault_anonflush(struct vm_anon **anon
*/
/*
+ * uvmfault_init: compute proper values for the uvmadvice[] array.
+ */
+
+void
+uvmfault_init()
+{
+ int npages;
+
+ npages = 16384 >> uvmexp.pageshift;
+ KASSERT(npages <= UVM_MAXRANGE / 2);
+
+ uvmadvice[UVM_ADV_NORMAL].nforw = npages;
+ uvmadvice[UVM_ADV_NORMAL].nback = npages - 1;
+
+ npages = 32768 >> uvmexp.pageshift;
+ KASSERT(npages <= UVM_MAXRANGE / 2);
+
+ uvmadvice[UVM_ADV_SEQUENTIAL].nforw = npages - 1;
+ uvmadvice[UVM_ADV_SEQUENTIAL].nback = npages;
+}
+
+/*
* uvmfault_amapcopy: clear "needs_copy" in a map.
*
* => if we are out of RAM we sleep (waiting for more)
@@ -687,8 +702,6 @@ ReFault:
if (narrow == FALSE) {
/* wide fault (!narrow) */
- KASSERT(uvmadvice[ufi.entry->advice].advice ==
- ufi.entry->advice);
nback = min(uvmadvice[ufi.entry->advice].nback,
(ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
Index: uvm_fault.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.h,v
retrieving revision 1.13
diff -u -p -r1.13 uvm_fault.h
--- uvm_fault.h 25 Mar 2009 20:00:18 -0000 1.13
+++ uvm_fault.h 1 Apr 2014 17:31:03 -0000
@@ -72,6 +72,7 @@ struct uvm_faultinfo {
* fault prototypes
*/
+void uvmfault_init(void);
boolean_t uvmfault_lookup(struct uvm_faultinfo *, boolean_t);
boolean_t uvmfault_relock(struct uvm_faultinfo *);
Index: uvm_init.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_init.c,v
retrieving revision 1.30
diff -u -p -r1.30 uvm_init.c
--- uvm_init.c 15 Mar 2012 17:52:28 -0000 1.30
+++ uvm_init.c 1 Apr 2014 17:31:03 -0000
@@ -114,6 +114,12 @@ uvm_init(void)
uvm_km_init(kvm_start, kvm_end);
/*
+ * step 4.5: init (tune) the fault recovery code.
+ */
+
+ uvmfault_init();
+
+ /*
* step 5: init the pmap module. the pmap module is free to allocate
* memory for its private use (e.g. pvlists).
*/