The existing ept_set_entry() and ept_get_entry() routines are extended to optionally set/get suppress_ve and renamed. New ept_set_entry() and ept_get_entry() routines are provided as wrappers, where set preserves suppress_ve for an existing entry and sets it for a new entry.
Additional function pointers are added to p2m_domain to allow direct access to the extended routines. Signed-off-by: Ed White <edmund.h.wh...@intel.com> --- xen/arch/x86/mm/p2m-ept.c | 40 +++++++++++++++++++++++++++++++++------- xen/include/asm-x86/p2m.h | 13 +++++++++++++ 2 files changed, 46 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 5de3387..e7719cf 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -649,14 +649,15 @@ bool_t ept_handle_misconfig(uint64_t gpa) } /* - * ept_set_entry() computes 'need_modify_vtd_table' for itself, + * ept_set_entry_sve() computes 'need_modify_vtd_table' for itself, * by observing whether any gfn->mfn translations are modified. * * Returns: 0 for success, -errno for failure */ static int -ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, - unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma) +ept_set_entry_sve(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, + unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma, + unsigned int sve) { ept_entry_t *table, *ept_entry = NULL; unsigned long gfn_remainder = gfn; @@ -802,7 +803,11 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma); } - new_entry.suppress_ve = 1; + if ( sve != ~0 ) + new_entry.suppress_ve = !!sve; + else + new_entry.suppress_ve = is_epte_valid(&old_entry) ? + old_entry.suppress_ve : 1; rc = atomic_write_ept_entry(ept_entry, new_entry, target); if ( unlikely(rc) ) @@ -847,10 +852,18 @@ out: return rc; } +static int +ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, + unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma) +{ + return ept_set_entry_sve(p2m, gfn, mfn, order, p2mt, p2ma, ~0); +} + /* Read ept p2m entries */ -static mfn_t ept_get_entry(struct p2m_domain *p2m, - unsigned long gfn, p2m_type_t *t, p2m_access_t* a, - p2m_query_t q, unsigned int *page_order) +static mfn_t ept_get_entry_sve(struct p2m_domain *p2m, + unsigned long gfn, p2m_type_t *t, p2m_access_t* a, + p2m_query_t q, unsigned int *page_order, + unsigned int *sve) { ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m))); unsigned long gfn_remainder = gfn; @@ -864,6 +877,8 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m, *t = p2m_mmio_dm; *a = p2m_access_n; + if ( sve ) + *sve = 1; /* This pfn is higher than the highest the p2m map currently holds */ if ( gfn > p2m->max_mapped_pfn ) @@ -929,6 +944,8 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m, else *t = ept_entry->sa_p2mt; *a = ept_entry->access; + if ( sve ) + *sve = ept_entry->suppress_ve; mfn = _mfn(ept_entry->mfn); if ( i ) @@ -952,6 +969,13 @@ out: return mfn; } +static mfn_t ept_get_entry(struct p2m_domain *p2m, + unsigned long gfn, p2m_type_t *t, p2m_access_t* a, + p2m_query_t q, unsigned int *page_order) +{ + return ept_get_entry_sve(p2m, gfn, t, a, q, page_order, NULL); +} + void ept_walk_table(struct domain *d, unsigned long gfn) { struct p2m_domain *p2m = p2m_get_hostp2m(d); @@ -1130,6 +1154,8 @@ int ept_p2m_init(struct p2m_domain *p2m) p2m->set_entry = ept_set_entry; p2m->get_entry = ept_get_entry; + p2m->set_entry_full = ept_set_entry_sve; + p2m->get_entry_full = ept_get_entry_sve; p2m->change_entry_type_global = ept_change_entry_type_global; p2m->change_entry_type_range = ept_change_entry_type_range; p2m->memory_type_changed = ept_memory_type_changed; diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index d916891..16fd523 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -237,6 +237,19 @@ struct p2m_domain { p2m_access_t *p2ma, p2m_query_t q, unsigned int *page_order); + int (*set_entry_full)(struct p2m_domain *p2m, + unsigned long gfn, + mfn_t mfn, unsigned int page_order, + p2m_type_t p2mt, + p2m_access_t p2ma, + unsigned int sve); + mfn_t (*get_entry_full)(struct p2m_domain *p2m, + unsigned long gfn, + p2m_type_t *p2mt, + p2m_access_t *p2ma, + p2m_query_t q, + unsigned int *page_order, + unsigned int *sve); void (*enable_hardware_log_dirty)(struct p2m_domain *p2m); void (*disable_hardware_log_dirty)(struct p2m_domain *p2m); void (*flush_hardware_cached_dirty)(struct p2m_domain *p2m); -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel