Hello Sergej,
On 16/08/16 23:16, Sergej Proskurin wrote:
---
xen/arch/arm/p2m.c | 71 +++++++++++++++++++++++++++++++++++++++++------
xen/include/asm-arm/p2m.h | 11 ++++++++
2 files changed, 73 insertions(+), 9 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index e859fca..9ef19d4 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1245,27 +1245,53 @@ static void p2m_free_vmid(struct domain *d)
spin_unlock(&vmid_alloc_lock);
}
-void p2m_teardown(struct domain *d)
+/* Reset this p2m table to be empty. */
+void p2m_flush_table(struct p2m_domain *p2m)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
- struct page_info *pg;
+ struct page_info *page, *pg;
+ unsigned int i;
+
+ if ( p2m->root )
+ {
+ page = p2m->root;
+
+ /* Clear all concatenated first level pages. */
s/first/root/
+ for ( i = 0; i < P2M_ROOT_PAGES; i++ )
+ clear_and_clean_page(page + i);
Clearing live page table like that is not safe. Each entry (64-bit)
should be written atomically to avoid breaking coherency (e.g if the MMU
is walking the page table at the same time). However you don't know the
implementation of clear_and_clean_page.
Also, this adds a small overhead when tearing down a p2m because the
clear is not necessary.
+ }
+
+ /*
+ * Flush TLBs before releasing remaining intermediate p2m page tables to
+ * prevent illegal access to stalled TLB entries.
+ */
+ p2m_flush_tlb(p2m);
p2m_flush_table is called in 2 places:
- p2m_teardown_one
- altp2m_reset
For p2m_teardown_one, the p2m may not have been allocated because the
initialization failed. So try flush the TLBs may lead to a panic in Xen
(the vttbr is invalid).
For altp2m_reset, this is called while updating the page tables (see
altp2m_propagate_change). vCPU may still use the page tables at that time.
I am a bit worry that clear_and_clean_page
+ /* Free the rest of the trie pages back to the paging pool. */
while ( (pg = page_list_remove_head(&p2m->pages)) )
free_domheap_page(pg);
+ p2m->lowest_mapped_gfn = INVALID_GFN;
+ p2m->max_mapped_gfn = _gfn(0);
+}
+
+void p2m_teardown_one(struct p2m_domain *p2m)
+{
+ p2m_flush_table(p2m);
+
if ( p2m->root )
free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
p2m->root = NULL;
- p2m_free_vmid(d);
+ p2m_free_vmid(p2m->domain);
+
+ p2m->vttbr = INVALID_VTTBR;
Why did you add this line?
radix_tree_destroy(&p2m->mem_access_settings, NULL);
}
-int p2m_init(struct domain *d)
+int p2m_init_one(struct domain *d, struct p2m_domain *p2m)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc = 0;
rwlock_init(&p2m->lock);
@@ -1278,11 +1304,14 @@ int p2m_init(struct domain *d)
return rc;
p2m->max_mapped_gfn = _gfn(0);
- p2m->lowest_mapped_gfn = _gfn(ULONG_MAX);
+ p2m->lowest_mapped_gfn = INVALID_GFN;
Why this change?
p2m->domain = d;
+ p2m->access_required = false;
This is not necessary as the p2m is zeroed during the allocation.
p2m->default_access = p2m_access_rwx;
p2m->mem_access_enabled = false;
+ p2m->root = NULL;
+ p2m->vttbr = INVALID_VTTBR;
Why do you add those 2 lines?
radix_tree_init(&p2m->mem_access_settings);
/*
@@ -1293,9 +1322,33 @@ int p2m_init(struct domain *d)
p2m->clean_pte = iommu_enabled &&
!iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
- rc = p2m_alloc_table(d);
+ return p2m_alloc_table(d);
+}
- return rc;
+static void p2m_teardown_hostp2m(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ p2m_teardown_one(p2m);
+}
+
+void p2m_teardown(struct domain *d)
+{
+ p2m_teardown_hostp2m(d);
+}
+
+static int p2m_init_hostp2m(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ p2m->p2m_class = p2m_host;
+
+ return p2m_init_one(d, p2m);
+}
+
+int p2m_init(struct domain *d)
+{
+ return p2m_init_hostp2m(d);
}
/*
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index fa07e19..1a004ed 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -11,6 +11,8 @@
#define paddr_bits PADDR_BITS
+#define INVALID_VTTBR (0UL)
Looking at the current use, you only use INVALID_VTTBR to set but not
tested. However, the 2 places where it is use are not necessary.
+
/* Holds the bit size of IPAs in p2m tables. */
extern unsigned int p2m_ipa_bits;
@@ -226,6 +228,15 @@ void guest_physmap_remove_page(struct domain *d,
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
+/* Flushes the page table held by the p2m. */
+void p2m_flush_table(struct p2m_domain *p2m);
+
+/* Initialize the p2m structure. */
+int p2m_init_one(struct domain *d, struct p2m_domain *p2m);
+
+/* Release resources held by the p2m structure. */
+void p2m_teardown_one(struct p2m_domain *p2m);
+
/*
* P2M rwlock helpers.
*/
Regards,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel