Hi George, Thanks for the patch, I've did some work on it and I've attached a version that suits our needs.
I have a few comments/questions to you and Tamas because in v3 there was a discussion about keeping the "if ( !mfn_valid(*mfn) || *t != p2m_ram_rw )" check inside the new function or let the caller do this check. If we let the caller do it then we need to used a bool to signal if the result was found in hostp2m (this is to keep the current functionality). So the main questions are if we need that check? and where should it be? On 10.04.2019 19:02, George Dunlap wrote: > On 4/9/19 1:03 PM, Alexandru Stefan ISAILA wrote: >> This patch moves common code from p2m_set_altp2m_mem_access() and >> p2m_change_altp2m_gfn() into one function >> >> Signed-off-by: Alexandru Isaila <aisa...@bitdefender.com> > > This patch contains a lot of behavioral changes which aren't mentioned > or explained. For instance... > >> --- >> Changes since V2: >> - Change var name from found_in_hostp2m to copied_from_hostp2m >> - Move the type check from altp2m_get_gfn_type_access() to the >> callers. >> --- >> xen/arch/x86/mm/mem_access.c | 32 ++++++++++++---------------- >> xen/arch/x86/mm/p2m.c | 41 ++++++++++++++---------------------- >> xen/include/asm-x86/p2m.h | 19 +++++++++++++++++ >> 3 files changed, 49 insertions(+), 43 deletions(-) >> >> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c >> index 56c06a4fc6..bf67ddb15a 100644 >> --- a/xen/arch/x86/mm/mem_access.c >> +++ b/xen/arch/x86/mm/mem_access.c >> @@ -265,31 +265,27 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct >> p2m_domain *hp2m, >> unsigned int page_order; >> unsigned long gfn_l = gfn_x(gfn); >> int rc; >> + bool copied_from_hostp2m; >> >> - mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL); >> + mfn = altp2m_get_gfn_type_access(ap2m, gfn, &t, &old_a, &page_order, >> &copied_from_hostp2m); >> >> - /* Check host p2m if no valid entry in alternate */ >> if ( !mfn_valid(mfn) ) >> + return -ESRCH; >> + >> + /* If this is a superpage, copy that first */ >> + if ( page_order != PAGE_ORDER_4K && copied_from_hostp2m ) >> { >> + unsigned long mask = ~((1UL << page_order) - 1); >> + gfn_t gfn2 = _gfn(gfn_l & mask); >> + mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); >> >> - mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, >> - P2M_ALLOC | P2M_UNSHARE, &page_order, >> 0); >> + /* Note: currently it is not safe to remap to a shared entry */ >> + if ( t != p2m_ram_rw ) >> + return -ESRCH; >> >> - rc = -ESRCH; >> - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) >> + rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); >> + if ( rc ) >> return rc; >> - >> - /* If this is a superpage, copy that first */ >> - if ( page_order != PAGE_ORDER_4K ) >> - { >> - unsigned long mask = ~((1UL << page_order) - 1); >> - gfn_t gfn2 = _gfn(gfn_l & mask); >> - mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); >> - >> - rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); >> - if ( rc ) >> - return rc; >> - } >> } >> >> /* >> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c >> index b9bbb8f485..d38d7c29ca 100644 >> --- a/xen/arch/x86/mm/p2m.c >> +++ b/xen/arch/x86/mm/p2m.c >> @@ -2626,6 +2626,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned >> int idx, >> mfn_t mfn; >> unsigned int page_order; >> int rc = -EINVAL; >> + bool copied_from_hostp2m; >> >> if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == >> mfn_x(INVALID_MFN) ) >> return rc; >> @@ -2636,7 +2637,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned >> int idx, >> p2m_lock(hp2m); >> p2m_lock(ap2m); >> >> - mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL); >> + mfn = altp2m_get_gfn_type_access(ap2m, old_gfn, &t, &a, &page_order, >> &copied_from_hostp2m); > > Before, if new_gfn was INVALID_GFN, then the host p2m wasn't read at > all. Now, the hostp2m will have __get_gfn_type_access() called with > P2M_ALLOC | P2M_UNSHARE. Is that change intentional, and if so, why? This has been requested by Tamas in v2. > >> >> if ( gfn_eq(new_gfn, INVALID_GFN) ) >> { >> @@ -2646,37 +2647,27 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned >> int idx, >> goto out; >> } >> >> - /* Check host p2m if no valid entry in alternate */ >> - if ( !mfn_valid(mfn) ) >> - { >> - mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a, >> - P2M_ALLOC, &page_order, 0); >> + if ( !mfn_valid(mfn) || (t != p2m_ram_rw && copied_from_hostp2m) ) >> + goto out; >> >> - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) >> - goto out; >> - >> - /* If this is a superpage, copy that first */ >> - if ( page_order != PAGE_ORDER_4K ) >> - { >> - gfn_t gfn; >> - unsigned long mask; >> + /* If this is a superpage, copy that first */ >> + if ( page_order != PAGE_ORDER_4K && copied_from_hostp2m ) >> + { >> + gfn_t gfn; >> + unsigned long mask; >> >> - mask = ~((1UL << page_order) - 1); >> - gfn = _gfn(gfn_x(old_gfn) & mask); >> - mfn = _mfn(mfn_x(mfn) & mask); >> + mask = ~((1UL << page_order) - 1); >> + gfn = _gfn(gfn_x(old_gfn) & mask); >> + mfn = _mfn(mfn_x(mfn) & mask); >> >> - if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a, 1) ) >> - goto out; >> - } >> + if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a, 1) ) >> + goto out; >> } >> >> - mfn = ap2m->get_entry(ap2m, new_gfn, &t, &a, 0, NULL, NULL); >> - >> - if ( !mfn_valid(mfn) ) >> - mfn = hp2m->get_entry(hp2m, new_gfn, &t, &a, 0, NULL, NULL); >> + mfn = altp2m_get_gfn_type_access(ap2m, new_gfn, &t, &a, &page_order, >> &copied_from_hostp2m); > > Similiarly here: Before this patch, hp2m->get_entry() is called > directly; after this patch, we go through __get_gfn_type_access(), which > adds some extra code, and will also unshare / allocate. Is that > intentional, and if so, why? > >> >> /* Note: currently it is not safe to remap to a shared entry */ >> - if ( !mfn_valid(mfn) || (t != p2m_ram_rw) ) >> + if ( !mfn_valid(mfn) || (t != p2m_ram_rw && copied_from_hostp2m) ) >> goto out; >> >> if ( !ap2m->set_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K, t, a, >> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h >> index 2801a8ccca..6de1546d76 100644 >> --- a/xen/include/asm-x86/p2m.h >> +++ b/xen/include/asm-x86/p2m.h >> @@ -448,6 +448,25 @@ static inline mfn_t __nonnull(3) get_gfn_type( >> return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL); >> } >> >> +static inline mfn_t altp2m_get_gfn_type_access( >> + struct p2m_domain *ap2m, gfn_t gfn, p2m_type_t *t, p2m_access_t *a, >> + unsigned int *page_order, bool *copied_from_hostp2m) >> +{ >> + mfn_t mfn = ap2m->get_entry(ap2m, gfn, t, a, 0, NULL, NULL); >> + >> + *copied_from_hostp2m = false; >> + >> + /* Check host p2m if no valid entry in alternate */ >> + if ( !mfn_valid(mfn) ) >> + { >> + mfn = __get_gfn_type_access(p2m_get_hostp2m(ap2m->domain), >> gfn_x(gfn), t, a, >> + P2M_ALLOC | P2M_UNSHARE, page_order, >> false); >> + *copied_from_hostp2m = mfn_valid(mfn); >> + } >> + >> + return mfn; >> +} > > Given that the main goal here seems to be to clean up the interface, I'm > not clear why you don't have this function do both the "host > see-through" and the prepopulation. Would something like the attached > work (not even compile tested)? I was thinking of going with altp2m_get_entry_direct over see-through > > (To be clear, this is just meant to be a sketch so you can see what I'm > talking about; if you were to use it you'd need to fix it up > appropriately, including considering whether "seethrough" is an > appropriate name for the function.) > Alex
From c1b1f554ce343763add0aad479edcd7e02fc565f Mon Sep 17 00:00:00 2001 From: Alexandru Isaila <aisa...@bitdefender.com> Date: Thu, 11 Apr 2019 14:39:31 +0300 Subject: [PATCH] altp2m: Add a function to automatically handle copying prepopulating from hostpm Signed-off-by: Alexandru Isaila <aisa...@bitdefender.com> --- xen/arch/x86/mm/mem_access.c | 30 ++-------------- xen/arch/x86/mm/p2m.c | 84 ++++++++++++++++++++++++-------------------- xen/include/asm-x86/p2m.h | 17 +++++++++ 3 files changed, 66 insertions(+), 65 deletions(-) diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c index a144bb0..ddfe016 100644 --- a/xen/arch/x86/mm/mem_access.c +++ b/xen/arch/x86/mm/mem_access.c @@ -262,35 +262,11 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, mfn_t mfn; p2m_type_t t; p2m_access_t old_a; - unsigned int page_order; - unsigned long gfn_l = gfn_x(gfn); int rc; - mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL); - - /* Check host p2m if no valid entry in alternate */ - if ( !mfn_valid(mfn) ) - { - - mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, - P2M_ALLOC | P2M_UNSHARE, &page_order, 0); - - rc = -ESRCH; - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) - return rc; - - /* If this is a superpage, copy that first */ - if ( page_order != PAGE_ORDER_4K ) - { - unsigned long mask = ~((1UL << page_order) - 1); - gfn_t gfn2 = _gfn(gfn_l & mask); - mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); - - rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); - if ( rc ) - return rc; - } - } + rc = altp2m_get_entry_prepopulate(ap2m, gfn, &mfn, &t, &old_a); + if ( rc ) + return rc; /* * Inherit the old suppress #VE bit value if it is already set, or set it diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 9e81a30..aea200f 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -478,6 +478,43 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m) mm_write_unlock(&p2m->lock); } +int get_altp2m_entry(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, p2m_type_t *t, + p2m_access_t *a, bool prepopulate) +{ + *mfn = ap2m->get_entry(ap2m, gfn, t, a, 0, NULL, NULL); + + /* Check host p2m if no valid entry in alternate */ + if ( !mfn_valid(*mfn) && !p2m_is_hostp2m(ap2m) ) + { + struct p2m_domain *hp2m = p2m_get_hostp2m(ap2m->domain); + unsigned int page_order; + int rc; + + *mfn = __get_gfn_type_access(hp2m, gfn_x(gfn), t, a, + P2M_ALLOC | P2M_UNSHARE, &page_order, 0); + + rc = -ESRCH; + if ( !mfn_valid(*mfn) || *t != p2m_ram_rw ) + return rc; + + /* If this is a superpage, copy that first */ + if ( prepopulate && page_order != PAGE_ORDER_4K ) + { + unsigned long mask = ~((1UL << page_order) - 1); + gfn_t gfn_aligned = _gfn(gfn_x(gfn) & mask); + mfn_t mfn_aligned = _mfn(mfn_x(*mfn) & mask); + + rc = ap2m->set_entry(ap2m, gfn_aligned, mfn_aligned, page_order, *t, *a, 1); + if ( rc ) + return rc; + } + } + + return 0; +} + + mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked) @@ -2618,7 +2655,6 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, p2m_access_t a; p2m_type_t t; mfn_t mfn; - unsigned int page_order; int rc = -EINVAL; if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) @@ -2630,47 +2666,21 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, p2m_lock(hp2m); p2m_lock(ap2m); - mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL); - if ( gfn_eq(new_gfn, INVALID_GFN) ) { + mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL); if ( mfn_valid(mfn) ) p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K); rc = 0; goto out; } - /* Check host p2m if no valid entry in alternate */ - if ( !mfn_valid(mfn) ) - { - mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a, - P2M_ALLOC, &page_order, 0); - - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) - goto out; - - /* If this is a superpage, copy that first */ - if ( page_order != PAGE_ORDER_4K ) - { - gfn_t gfn; - unsigned long mask; - - mask = ~((1UL << page_order) - 1); - gfn = _gfn(gfn_x(old_gfn) & mask); - mfn = _mfn(mfn_x(mfn) & mask); - - if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a, 1) ) - goto out; - } - } - - mfn = ap2m->get_entry(ap2m, new_gfn, &t, &a, 0, NULL, NULL); - - if ( !mfn_valid(mfn) ) - mfn = hp2m->get_entry(hp2m, new_gfn, &t, &a, 0, NULL, NULL); + rc = altp2m_get_entry_prepopulate(ap2m, old_gfn, &mfn, &t, &a); + if ( rc ) + goto out; - /* Note: currently it is not safe to remap to a shared entry */ - if ( !mfn_valid(mfn) || (t != p2m_ram_rw) ) + rc = altp2m_get_entry_seethrough(ap2m, new_gfn, &mfn, &t, &a); + if ( rc ) goto out; if ( !ap2m->set_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K, t, a, @@ -3002,12 +3012,10 @@ int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve, if ( ap2m ) p2m_lock(ap2m); - mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); - if ( !mfn_valid(mfn) ) - { - rc = -ESRCH; + rc = altp2m_get_entry_seethrough(p2m, gfn, &mfn, &t, &a); + + if ( rc ) goto out; - } rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, suppress_ve); diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 2801a8c..d3613c0 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -514,6 +514,23 @@ static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn) return mfn_x(mfn); } +int get_altp2m_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a, bool prepopulate); + +static inline int altp2m_get_entry_seethrough(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a) +{ + return get_altp2m_entry(ap2m, gfn, mfn, t, a, false); +} + +static inline int altp2m_get_entry_prepopulate(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a) +{ + return get_altp2m_entry(ap2m, gfn, mfn, t, a, true); +} + /* Deadlock-avoidance scheme when calling get_gfn on different gfn's */ struct two_gfns { struct domain *first_domain, *second_domain; -- 2.7.4
_______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel