On Wed, 19 Sep 2018 19:35:02 +0000 (UTC) Konstantin Belousov <k...@freebsd.org> 
wrote:
> Author: kib
> Date: Wed Sep 19 19:35:02 2018
> New Revision: 338807
> URL: https://svnweb.freebsd.org/changeset/base/338807
> 
> Log:
>   Convert x86 cache invalidation functions to ifuncs.
>   
>   This simplifies the runtime logic and reduces the number of
>   runtime-constant branches.
>   
>   Reviewed by:        alc, markj
>   Sponsored by:       The FreeBSD Foundation
>   Approved by:        re (gjb)
>   Differential revision:      https://reviews.freebsd.org/D16736
> 
> Modified:
>   head/sys/amd64/amd64/pmap.c
>   head/sys/amd64/include/pmap.h
>   head/sys/dev/drm2/drm_os_freebsd.c
>   head/sys/dev/drm2/i915/intel_ringbuffer.c
>   head/sys/i386/i386/pmap.c
>   head/sys/i386/i386/vm_machdep.c
>   head/sys/i386/include/pmap.h
>   head/sys/x86/iommu/intel_utils.c
> 
> Modified: head/sys/i386/i386/pmap.c
> ==============================================================================
> --- head/sys/i386/i386/pmap.c Wed Sep 19 19:13:43 2018        (r338806)
> +++ head/sys/i386/i386/pmap.c Wed Sep 19 19:35:02 2018        (r338807)
> @@ -148,6 +148,7 @@ __FBSDID("$FreeBSD$");
>  #include <machine/intr_machdep.h>
>  #include <x86/apicvar.h>
>  #endif
> +#include <x86/ifunc.h>
>  #include <machine/bootinfo.h>
>  #include <machine/cpu.h>
>  #include <machine/cputypes.h>
> @@ -314,6 +315,10 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, 
>      vm_page_t m, vm_prot_t prot, vm_page_t mpte);
>  static void pmap_flush_page(vm_page_t m);
>  static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
> +static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
> +    vm_offset_t eva);
> +static void pmap_invalidate_cache_range_all(vm_offset_t sva,
> +    vm_offset_t eva);
>  static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
>                   pd_entry_t pde);
>  static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
> @@ -1407,37 +1412,62 @@ pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 
>               pmap_invalidate_page(pmap, va);
>  }
>  
> +DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t),
> +    static)
> +{
> +
> +     if ((cpu_feature & CPUID_SS) != 0)
> +             return (pmap_invalidate_cache_range_selfsnoop);
> +     if ((cpu_feature & CPUID_CLFSH) != 0)
> +             return (pmap_force_invalidate_cache_range);
> +     return (pmap_invalidate_cache_range_all);
> +}
> +
>  #define      PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
>  
> +static void
> +pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
> +{
> +
> +     KASSERT((sva & PAGE_MASK) == 0,
> +         ("pmap_invalidate_cache_range: sva not page-aligned"));
> +     KASSERT((eva & PAGE_MASK) == 0,
> +         ("pmap_invalidate_cache_range: eva not page-aligned"));
> +}
> +
> +static void
> +pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
> +{
> +
> +     pmap_invalidate_cache_range_check_align(sva, eva);
> +}
> +
>  void
> -pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t 
> force)
> +pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
>  {
>  
> -     if (force) {
> -             sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
> -     } else {
> -             KASSERT((sva & PAGE_MASK) == 0,
> -                 ("pmap_invalidate_cache_range: sva not page-aligned"));
> -             KASSERT((eva & PAGE_MASK) == 0,
> -                 ("pmap_invalidate_cache_range: eva not page-aligned"));
> +     sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
> +     if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
> +             /*
> +              * The supplied range is bigger than 2MB.
> +              * Globally invalidate cache.
> +              */
> +             pmap_invalidate_cache();
> +             return;
>       }
>  
> -     if ((cpu_feature & CPUID_SS) != 0 && !force)
> -             ; /* If "Self Snoop" is supported and allowed, do nothing. */
> -     else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
> -         eva - sva < PMAP_CLFLUSH_THRESHOLD) {
> -#ifdef DEV_APIC
> +     /*
> +      * XXX: Some CPUs fault, hang, or trash the local APIC
> +      * registers if we use CLFLUSH on the local APIC
> +      * range.  The local APIC is always uncached, so we
> +      * don't need to flush for that range anyway.
> +      */
> +     if (pmap_kextract(sva) == lapic_paddr)
> +             return;
> +
> +     if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
>               /*
> -              * XXX: Some CPUs fault, hang, or trash the local APIC
> -              * registers if we use CLFLUSH on the local APIC
> -              * range.  The local APIC is always uncached, so we
> -              * don't need to flush for that range anyway.
> -              */
> -             if (pmap_kextract(sva) == lapic_paddr)
> -                     return;
> -#endif
> -             /*
> -              * Otherwise, do per-cache line flush.  Use the sfence
> +              * Do per-cache line flush.  Use the sfence
>                * instruction to insure that previous stores are
>                * included in the write-back.  The processor
>                * propagates flush to other processors in the cache
> @@ -1447,12 +1477,7 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset
>               for (; sva < eva; sva += cpu_clflush_line_size)
>                       clflushopt(sva);
>               sfence();
> -     } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
> -         eva - sva < PMAP_CLFLUSH_THRESHOLD) {
> -#ifdef DEV_APIC
> -             if (pmap_kextract(sva) == lapic_paddr)
> -                     return;
> -#endif
> +     } else {
>               /*
>                * Writes are ordered by CLFLUSH on Intel CPUs.
>                */

The removal of #ifdef DEV_APIC breaks building kernels without device
apic:

/usr/src/sys/i386/i386/pmap.c:1465:28: error: 
      use of undeclared identifier 'lapic_paddr'
        if (pmap_kextract(sva) == lapic_paddr)
                                  ^
1 error generated.
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to