mirror of
https://abf.rosa.ru/djam/kernel-xen.git
synced 2025-02-23 07:42:46 +00:00
72 lines
1.9 KiB
Text
72 lines
1.9 KiB
Text
Subject: be more aggressive about de-activating mm-s under destruction
|
|
From: jbeulich@novell.com
|
|
Patch-mainline: obsolete
|
|
|
|
... by not only handling the current task on the CPU arch_exit_mmap()
|
|
gets executed on, but also forcing remote CPUs to do so.
|
|
|
|
--- sle11sp1-2010-03-22.orig/arch/x86/mm/pgtable-xen.c 2010-03-22 12:59:39.000000000 +0100
|
|
+++ sle11sp1-2010-03-22/arch/x86/mm/pgtable-xen.c 2010-03-22 12:59:47.000000000 +0100
|
|
@@ -1,5 +1,6 @@
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/smp.h>
|
|
#include <xen/features.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
@@ -437,27 +438,44 @@ void arch_dup_mmap(struct mm_struct *old
|
|
mm_pin(mm);
|
|
}
|
|
|
|
-void arch_exit_mmap(struct mm_struct *mm)
|
|
+/*
|
|
+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much*
|
|
+ * faster this way, as no hypercalls are needed for the page table updates.
|
|
+ */
|
|
+static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm)
|
|
+ __releases(tsk->alloc_lock)
|
|
{
|
|
- struct task_struct *tsk = current;
|
|
-
|
|
- task_lock(tsk);
|
|
-
|
|
- /*
|
|
- * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
|
|
- * *much* faster this way, as no tlb flushes means bigger wrpt batches.
|
|
- */
|
|
if (tsk->active_mm == mm) {
|
|
tsk->active_mm = &init_mm;
|
|
atomic_inc(&init_mm.mm_count);
|
|
|
|
switch_mm(mm, &init_mm, tsk);
|
|
|
|
- atomic_dec(&mm->mm_count);
|
|
- BUG_ON(atomic_read(&mm->mm_count) == 0);
|
|
+ if (atomic_dec_and_test(&mm->mm_count))
|
|
+ BUG();
|
|
}
|
|
|
|
task_unlock(tsk);
|
|
+}
|
|
+
|
|
+static void _leave_active_mm(void *mm)
|
|
+{
|
|
+ struct task_struct *tsk = current;
|
|
+
|
|
+ if (spin_trylock(&tsk->alloc_lock))
|
|
+ leave_active_mm(tsk, mm);
|
|
+}
|
|
+
|
|
+void arch_exit_mmap(struct mm_struct *mm)
|
|
+{
|
|
+ struct task_struct *tsk = current;
|
|
+
|
|
+ task_lock(tsk);
|
|
+ leave_active_mm(tsk, mm);
|
|
+
|
|
+ preempt_disable();
|
|
+ smp_call_function_many(mm_cpumask(mm), _leave_active_mm, mm, 1);
|
|
+ preempt_enable();
|
|
|
|
if (PagePinned(virt_to_page(mm->pgd))
|
|
&& atomic_read(&mm->mm_count) == 1
|