On Mon, Apr 12, 2021 at 04:12:41PM +0200, Jan Beulich wrote:
> While doing so, make the option dependent upon HVM, which really is the
> main purpose of the change.
> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>
> ---
> v2: New.
> 
> --- a/xen/arch/x86/Kconfig
> +++ b/xen/arch/x86/Kconfig
> @@ -15,7 +15,6 @@ config X86
>       select HAS_FAST_MULTIPLY
>       select HAS_IOPORTS
>       select HAS_KEXEC
> -     select HAS_MEM_PAGING
>       select HAS_NS16550
>       select HAS_PASSTHROUGH
>       select HAS_PCI
> @@ -251,6 +250,10 @@ config HYPERV_GUEST
>  
>  endif
>  
> +config MEM_PAGING
> +     bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
> +     depends on HVM
> +
>  config MEM_SHARING
>       bool "Xen memory sharing support (UNSUPPORTED)" if UNSUPPORTED
>       depends on HVM
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1932,9 +1932,11 @@ int hvm_hap_nested_page_fault(paddr_t gp
>          goto out_put_gfn;
>      }
>  
> +#ifdef CONFIG_MEM_PAGING
>      /* Check if the page has been paged out */
>      if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
>          paged = 1;
> +#endif
>  
>  #ifdef CONFIG_MEM_SHARING
>      /* Mem sharing: if still shared on write access then its enomem */
> --- a/xen/arch/x86/mm/Makefile
> +++ b/xen/arch/x86/mm/Makefile
> @@ -5,7 +5,7 @@ obj-$(CONFIG_HVM) += altp2m.o
>  obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
>  obj-$(CONFIG_SHADOW_PAGING) += guest_walk_4.o
>  obj-$(CONFIG_MEM_ACCESS) += mem_access.o
> -obj-y += mem_paging.o
> +obj-$(CONFIG_MEM_PAGING) += mem_paging.o
>  obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
>  obj-y += p2m.o p2m-pt.o
>  obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
> --- a/xen/arch/x86/x86_64/compat/mm.c
> +++ b/xen/arch/x86/x86_64/compat/mm.c
> @@ -155,8 +155,10 @@ int compat_arch_memory_op(unsigned long
>      case XENMEM_get_sharing_shared_pages:
>          return mem_sharing_get_nr_shared_mfns();
>  
> +#ifdef CONFIG_MEM_PAGING
>      case XENMEM_paging_op:
>          return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
> +#endif
>  
>  #ifdef CONFIG_MEM_SHARING
>      case XENMEM_sharing_op:
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -994,8 +994,10 @@ long subarch_memory_op(unsigned long cmd
>      case XENMEM_get_sharing_shared_pages:
>          return mem_sharing_get_nr_shared_mfns();
>  
> +#ifdef CONFIG_MEM_PAGING
>      case XENMEM_paging_op:
>          return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
> +#endif

I would create a dummy handler when !CONFIG_MEM_PAGING in
asm-x86/mem_paging.h.

> --- a/xen/include/asm-x86/mem_paging.h
> +++ b/xen/include/asm-x86/mem_paging.h
> @@ -24,6 +24,12 @@
>  
>  int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg);
>  
> +#ifdef CONFIG_MEM_PAGING
> +# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging)
> +#else
> +# define mem_paging_enabled(d) false
> +#endif
> +
>  #endif /*__ASM_X86_MEM_PAGING_H__ */
>  
>  /*
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -136,11 +136,16 @@ typedef unsigned int p2m_query_t;
>  #define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \
>                              | p2m_to_mask(p2m_ram_logdirty) )
>  
> +#ifdef CONFIG_MEM_PAGING
>  #define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out)        \
>                            | p2m_to_mask(p2m_ram_paged)           \
>                            | p2m_to_mask(p2m_ram_paging_in))
>  
>  #define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
> +#else
> +#define P2M_PAGING_TYPES 0
> +#define P2M_PAGED_TYPES 0
> +#endif

Since you don't guard the p2m related paged types in p2m_type_t is it
worth having diverging P2M_{PAGING/PAGED}_TYPES?

I guess it might be required in order to force the compiler to DCE
without having to add yet more CONFIG_MEM_PAGING guards?

I don't really have a strong opinion on whether the code should be
removed, IMO it's best to start by making it off by default at build
time and remove it in a later release?

Thanks, Roger.

Reply via email to