summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0029-x86-perform-mem_sharing-teardown-before-paging-teard.patch')
-rw-r--r--0029-x86-perform-mem_sharing-teardown-before-paging-teard.patch111
1 files changed, 111 insertions, 0 deletions
diff --git a/0029-x86-perform-mem_sharing-teardown-before-paging-teard.patch b/0029-x86-perform-mem_sharing-teardown-before-paging-teard.patch
new file mode 100644
index 0000000..c96f44e
--- /dev/null
+++ b/0029-x86-perform-mem_sharing-teardown-before-paging-teard.patch
@@ -0,0 +1,111 @@
+From 700320a79297fb5087f7dd540424c468b2d2cffe Mon Sep 17 00:00:00 2001
+From: Tamas K Lengyel <tamas@tklengyel.com>
+Date: Fri, 3 Mar 2023 08:14:25 +0100
+Subject: [PATCH 29/61] x86: perform mem_sharing teardown before paging
+ teardown
+
+An assert failure has been observed in p2m_teardown when performing vm
+forking and then destroying the forked VM (p2m-basic.c:173). The assert
+checks whether the domain's shared pages counter is 0. According to the
+patch that originally added the assert (7bedbbb5c31) the p2m_teardown
+should only happen after mem_sharing already relinquished all shared pages.
+
+In this patch we flip the order in which relinquish ops are called to avoid
+tripping the assert. Conceptually sharing being torn down makes sense to
+happen before paging is torn down.
+
+Fixes: e7aa55c0aab3 ("x86/p2m: free the paging memory pool preemptively")
+Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+master commit: 2869349f0cb3a89dcbf1f1b30371f58df6309312
+master date: 2023-02-23 12:35:48 +0100
+---
+ xen/arch/x86/domain.c | 56 ++++++++++++++++++++++---------------------
+ 1 file changed, 29 insertions(+), 27 deletions(-)
+
+diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
+index 3080cde62b..6eeb248908 100644
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -2343,9 +2343,9 @@ int domain_relinquish_resources(struct domain *d)
+
+ enum {
+ PROG_iommu_pagetables = 1,
++ PROG_shared,
+ PROG_paging,
+ PROG_vcpu_pagetables,
+- PROG_shared,
+ PROG_xen,
+ PROG_l4,
+ PROG_l3,
+@@ -2364,6 +2364,34 @@ int domain_relinquish_resources(struct domain *d)
+ if ( ret )
+ return ret;
+
++#ifdef CONFIG_MEM_SHARING
++ PROGRESS(shared):
++
++ if ( is_hvm_domain(d) )
++ {
++ /*
++ * If the domain has shared pages, relinquish them allowing
++ * for preemption.
++ */
++ ret = relinquish_shared_pages(d);
++ if ( ret )
++ return ret;
++
++ /*
++ * If the domain is forked, decrement the parent's pause count
++ * and release the domain.
++ */
++ if ( mem_sharing_is_fork(d) )
++ {
++ struct domain *parent = d->parent;
++
++ d->parent = NULL;
++ domain_unpause(parent);
++ put_domain(parent);
++ }
++ }
++#endif
++
+ PROGRESS(paging):
+
+ /* Tear down paging-assistance stuff. */
+@@ -2404,32 +2432,6 @@ int domain_relinquish_resources(struct domain *d)
+ d->arch.auto_unmask = 0;
+ }
+
+-#ifdef CONFIG_MEM_SHARING
+- PROGRESS(shared):
+-
+- if ( is_hvm_domain(d) )
+- {
+- /* If the domain has shared pages, relinquish them allowing
+- * for preemption. */
+- ret = relinquish_shared_pages(d);
+- if ( ret )
+- return ret;
+-
+- /*
+- * If the domain is forked, decrement the parent's pause count
+- * and release the domain.
+- */
+- if ( mem_sharing_is_fork(d) )
+- {
+- struct domain *parent = d->parent;
+-
+- d->parent = NULL;
+- domain_unpause(parent);
+- put_domain(parent);
+- }
+- }
+-#endif
+-
+ spin_lock(&d->page_alloc_lock);
+ page_list_splice(&d->arch.relmem_list, &d->page_list);
+ INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
+--
+2.40.0
+