On Thu, 22 Mar 2018, Cy Schubert wrote:
It broke i386 too.
I believe I'm not able to reproduce this because it is a result of changes
in kernel config. I can not reproduce it so I can't verify the fix.
Since you can, would you mind committing? I see no problem with the diff
below.
Thanks,
Jeff
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c (revision 331399)
+++ sys/vm/vm_reserv.c (working copy)
@@ -45,8 +45,6 @@
#include <sys/param.h>
#include <sys/kernel.h>
-#include <sys/counter.h>
-#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
@@ -55,6 +53,8 @@
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/counter.h>
+#include <sys/ktr.h>
#include <sys/vmmeter.h>
#include <sys/smp.h>
This is because sys/i386/include/machine.h uses critical_enter() and
critical_exit() which are defined in sys/systm.h.
It built nicely on my amd64's though.
~cy
In message <alpine.BSF.2.21.1803221451420.2307@desktop>, Jeff Roberson
writes:
Thank you, working on it. I had done a make universe before getting
review feedback.
Jeff
On Thu, 22 Mar 2018, Justin Hibbits wrote:
This broke gcc builds.
On Thu, Mar 22, 2018 at 2:21 PM, Jeff Roberson <j...@freebsd.org> wrote:
Author: jeff
Date: Thu Mar 22 19:21:11 2018
New Revision: 331369
URL: https://svnweb.freebsd.org/changeset/base/331369
Log:
Lock reservations with a dedicated lock in each reservation. Protect th
e
vmd_free_count with atomics.
This allows us to allocate and free from reservations without the free l
ock
except where a superpage is allocated from the physical layer, which is
roughly 1/512 of the operations on amd64.
Use the counter api to eliminate cache conention on counters.
Reviewed by: markj
Tested by: pho
Sponsored by: Netflix, Dell/EMC Isilon
Differential Revision: https://reviews.freebsd.org/D14707
Modified:
head/sys/vm/vm_page.c
head/sys/vm/vm_pagequeue.h
head/sys/vm/vm_reserv.c
head/sys/vm/vm_reserv.h
Modified: head/sys/vm/vm_page.c
==========================================================================
====
--- head/sys/vm/vm_page.c Thu Mar 22 19:11:43 2018 (r331368)
+++ head/sys/vm/vm_page.c Thu Mar 22 19:21:11 2018 (r331369)
@@ -177,7 +177,6 @@ static uma_zone_t fakepg_zone;
static void vm_page_alloc_check(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits
);
static void vm_page_enqueue(uint8_t queue, vm_page_t m);
-static void vm_page_free_phys(struct vm_domain *vmd, vm_page_t m);
static void vm_page_init(void *dummy);
static int vm_page_insert_after(vm_page_t m, vm_object_t object,
vm_pindex_t pindex, vm_page_t mpred);
@@ -1677,10 +1676,10 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_
t pi
* for the request class and false otherwise.
*/
int
-vm_domain_available(struct vm_domain *vmd, int req, int npages)
+vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
{
+ u_int limit, old, new;
- vm_domain_free_assert_locked(vmd);
req = req & VM_ALLOC_CLASS_MASK;
/*
@@ -1688,15 +1687,34 @@ vm_domain_available(struct vm_domain *vmd, int req
, in
*/
if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
req = VM_ALLOC_SYSTEM;
+ if (req == VM_ALLOC_INTERRUPT)
+ limit = 0;
+ else if (req == VM_ALLOC_SYSTEM)
+ limit = vmd->vmd_interrupt_free_min;
+ else
+ limit = vmd->vmd_free_reserved;
- if (vmd->vmd_free_count >= npages + vmd->vmd_free_reserved ||
- (req == VM_ALLOC_SYSTEM &&
- vmd->vmd_free_count >= npages + vmd->vmd_interrupt_free_min) |
|
- (req == VM_ALLOC_INTERRUPT &&
- vmd->vmd_free_count >= npages))
- return (1);
+ /*
+ * Attempt to reserve the pages. Fail if we're below the limit.
+ */
+ limit += npages;
+ old = vmd->vmd_free_count;
+ do {
+ if (old < limit)
+ return (0);
+ new = old - npages;
+ } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0)
;
- return (0);
+ /* Wake the page daemon if we've crossed the threshold. */
+ if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
+ pagedaemon_wakeup(vmd->vmd_domain);
+
+ /* Only update bitsets on transitions. */
+ if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
+ (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
+ vm_domain_set(vmd);
+
+ return (1);
}
vm_page_t
@@ -1723,44 +1741,34 @@ vm_page_alloc_domain_after(vm_object_t object, vm_
pind
again:
m = NULL;
#if VM_NRESERVLEVEL > 0
+ /*
+ * Can we allocate the page from a reservation?
+ */
if (vm_object_reserv(object) &&
- (m = vm_reserv_extend(req, object, pindex, domain, mpred))
- != NULL) {
+ ((m = vm_reserv_extend(req, object, pindex, domain, mpred)) !=
NULL ||
+ (m = vm_reserv_alloc_page(req, object, pindex, domain, mpred))
!= NULL)) {
domain = vm_phys_domain(m);
vmd = VM_DOMAIN(domain);
goto found;
}
#endif
vmd = VM_DOMAIN(domain);
- vm_domain_free_lock(vmd);
- if (vm_domain_available(vmd, req, 1)) {
+ if (vm_domain_allocate(vmd, req, 1)) {
/*
- * Can we allocate the page from a reservation?
+ * If not, allocate it from the free page queues.
*/
+ vm_domain_free_lock(vmd);
+ m = vm_phys_alloc_pages(domain, object != NULL ?
+ VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
+ vm_domain_free_unlock(vmd);
+ if (m == NULL) {
+ vm_domain_freecnt_inc(vmd, 1);
#if VM_NRESERVLEVEL > 0
- if (!vm_object_reserv(object) ||
- (m = vm_reserv_alloc_page(object, pindex,
- domain, mpred)) == NULL)
+ if (vm_reserv_reclaim_inactive(domain))
+ goto again;
#endif
- {
- /*
- * If not, allocate it from the free page queues.
- */
- m = vm_phys_alloc_pages(domain, object != NULL ?
- VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
-#if VM_NRESERVLEVEL > 0
- if (m == NULL && vm_reserv_reclaim_inactive(domain
)) {
- m = vm_phys_alloc_pages(domain,
- object != NULL ?
- VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRE
CT,
- 0);
- }
-#endif
}
}
- if (m != NULL)
- vm_domain_freecnt_dec(vmd, 1);
- vm_domain_free_unlock(vmd);
if (m == NULL) {
/*
* Not allocatable, give up.
@@ -1775,9 +1783,7 @@ again:
*/
KASSERT(m != NULL, ("missing page"));
-#if VM_NRESERVLEVEL > 0
found:
-#endif
'found' is now declared, but unused on powerpc64.
- Justin
--
Cheers,
Cy Schubert <cy.schub...@cschubert.com>
FreeBSD UNIX: <c...@freebsd.org> Web: http://www.FreeBSD.org
The need of the many outweighs the greed of the few.
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"