Author: kib
Date: Wed Oct  8 16:48:03 2014
New Revision: 272761
URL: https://svnweb.freebsd.org/changeset/base/272761

Log:
  Add an argument to the x86 pmap_invalidate_cache_range() to request
  forced invalidation of the cache range regardless of the presence of
  self-snoop feature.  Some recent Intel GPUs in some modes are not
  coherent, and dirty lines in CPU cache must be flushed before the
  pages are transferred to GPU domain.
  
  Reviewed by:  alc (previous version)
  Tested by:    pho (amd64)
  Sponsored by: The FreeBSD Foundation
  MFC after:    1 week

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/amd64/include/pmap.h
  head/sys/dev/drm2/i915/intel_ringbuffer.c
  head/sys/i386/i386/pmap.c
  head/sys/i386/i386/vm_machdep.c
  head/sys/i386/include/pmap.h

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed Oct  8 16:35:57 2014        (r272760)
+++ head/sys/amd64/amd64/pmap.c Wed Oct  8 16:48:03 2014        (r272761)
@@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
 
 void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
 {
 
-       KASSERT((sva & PAGE_MASK) == 0,
-           ("pmap_invalidate_cache_range: sva not page-aligned"));
-       KASSERT((eva & PAGE_MASK) == 0,
-           ("pmap_invalidate_cache_range: eva not page-aligned"));
+       if (force) {
+               sva &= ~(vm_offset_t)cpu_clflush_line_size;
+       } else {
+               KASSERT((sva & PAGE_MASK) == 0,
+                   ("pmap_invalidate_cache_range: sva not page-aligned"));
+               KASSERT((eva & PAGE_MASK) == 0,
+                   ("pmap_invalidate_cache_range: eva not page-aligned"));
+       }
 
-       if (cpu_feature & CPUID_SS)
-               ; /* If "Self Snoop" is supported, do nothing. */
+       if ((cpu_feature & CPUID_SS) != 0 && !force)
+               ; /* If "Self Snoop" is supported and allowed, do nothing. */
        else if ((cpu_feature & CPUID_CLFSH) != 0 &&
            eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 
@@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
        for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
                pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
        pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-       pmap_invalidate_cache_range(va, va + tmpsize);
+       pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
        return ((void *)(va + offset));
 }
 
@@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, 
         */
        if (changed) {
                pmap_invalidate_range(kernel_pmap, base, tmpva);
-               pmap_invalidate_cache_range(base, tmpva);
+               pmap_invalidate_cache_range(base, tmpva, FALSE);
        }
        return (error);
 }

Modified: head/sys/amd64/include/pmap.h
==============================================================================
--- head/sys/amd64/include/pmap.h       Wed Oct  8 16:35:57 2014        
(r272760)
+++ head/sys/amd64/include/pmap.h       Wed Oct  8 16:48:03 2014        
(r272761)
@@ -394,7 +394,8 @@ void        pmap_invalidate_range(pmap_t, vm_of
 void   pmap_invalidate_all(pmap_t);
 void   pmap_invalidate_cache(void);
 void   pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void   pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void   pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+           boolean_t force);
 void   pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
 #endif /* _KERNEL */
 

Modified: head/sys/dev/drm2/i915/intel_ringbuffer.c
==============================================================================
--- head/sys/dev/drm2/i915/intel_ringbuffer.c   Wed Oct  8 16:35:57 2014        
(r272760)
+++ head/sys/dev/drm2/i915/intel_ringbuffer.c   Wed Oct  8 16:48:03 2014        
(r272761)
@@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buff
                goto err_unpin;
        pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
        pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
-           (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+           (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
 
        pc->obj = obj;
        ring->private = pc;
@@ -1014,7 +1014,7 @@ static int init_status_page(struct intel
        pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
            1);
        pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
-           (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+           (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
        ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Wed Oct  8 16:35:57 2014        (r272760)
+++ head/sys/i386/i386/pmap.c   Wed Oct  8 16:48:03 2014        (r272761)
@@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
 #define        PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
 
 void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
 {
 
-       KASSERT((sva & PAGE_MASK) == 0,
-           ("pmap_invalidate_cache_range: sva not page-aligned"));
-       KASSERT((eva & PAGE_MASK) == 0,
-           ("pmap_invalidate_cache_range: eva not page-aligned"));
+       if (force) {
+               sva &= ~(vm_offset_t)cpu_clflush_line_size;
+       } else {
+               KASSERT((sva & PAGE_MASK) == 0,
+                   ("pmap_invalidate_cache_range: sva not page-aligned"));
+               KASSERT((eva & PAGE_MASK) == 0,
+                   ("pmap_invalidate_cache_range: eva not page-aligned"));
+       }
 
-       if (cpu_feature & CPUID_SS)
-               ; /* If "Self Snoop" is supported, do nothing. */
+       if ((cpu_feature & CPUID_SS) != 0 && !force)
+               ; /* If "Self Snoop" is supported and allowed, do nothing. */
        else if ((cpu_feature & CPUID_CLFSH) != 0 &&
            eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 
@@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
        for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
                pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
        pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-       pmap_invalidate_cache_range(va, va + size);
+       pmap_invalidate_cache_range(va, va + size, FALSE);
        return ((void *)(va + offset));
 }
 
@@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size
         */
        if (changed) {
                pmap_invalidate_range(kernel_pmap, base, tmpva);
-               pmap_invalidate_cache_range(base, tmpva);
+               pmap_invalidate_cache_range(base, tmpva, FALSE);
        }
        return (0);
 }

Modified: head/sys/i386/i386/vm_machdep.c
==============================================================================
--- head/sys/i386/i386/vm_machdep.c     Wed Oct  8 16:35:57 2014        
(r272760)
+++ head/sys/i386/i386/vm_machdep.c     Wed Oct  8 16:48:03 2014        
(r272761)
@@ -813,7 +813,7 @@ sf_buf_invalidate(struct sf_buf *sf)
         * settings are recalculated.
         */
        pmap_qenter(sf->kva, &m, 1);
-       pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
+       pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
 }
 
 /*

Modified: head/sys/i386/include/pmap.h
==============================================================================
--- head/sys/i386/include/pmap.h        Wed Oct  8 16:35:57 2014        
(r272760)
+++ head/sys/i386/include/pmap.h        Wed Oct  8 16:48:03 2014        
(r272761)
@@ -458,7 +458,8 @@ void        pmap_invalidate_range(pmap_t, vm_of
 void   pmap_invalidate_all(pmap_t);
 void   pmap_invalidate_cache(void);
 void   pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void   pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void   pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+           boolean_t force);
 
 #endif /* _KERNEL */
 
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to