kernel-xen/xen3-bug-561933_uv_pat_is_gru_range.patch

90 lines
3 KiB
Diff
Raw Normal View History

2012-04-14 04:10:33 +04:00
From: Jack Steiner <steiner@sgi.com>
Subject: x86: UV SGI: Don't track GRU space in PAT
References: bnc#561933, fate#306952
Patch-mainline: 2.6.33-rc1
Git-commit: fd12a0d69aee6d90fa9b9890db24368a897f8423
Commit fd12a0d69aee6d90fa9b9890db24368a897f8423 upstream.
GRU space is always mapped as WB in the page table. There is
no need to track the mappings in the PAT. This also eliminates
the "freeing invalid memtype" messages when the GRU space is unmapped.
Version 2 with changes suggested by Ingo (at least I think I understood what
he wanted).
Version 3 with changes suggested by Peter to make the new function
a member of the x86_platform structure.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Rafael J. Wysocki <rjw@suse.de>
Automatically created from "patches.arch/bug-561933_uv_pat_is_gru_range.patch" by xen-port-patches.py
--- sle11sp1-2010-03-22.orig/arch/x86/kernel/x86_init-xen.c 2009-11-06 10:52:23.000000000 +0100
+++ sle11sp1-2010-03-22/arch/x86/kernel/x86_init-xen.c 2009-12-16 12:13:32.000000000 +0100
@@ -13,6 +13,7 @@
#include <asm/e820.h>
#include <asm/time.h>
#include <asm/irq.h>
+#include <asm/pat.h>
void __cpuinit x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
@@ -64,6 +65,7 @@ struct x86_init_ops x86_init __initdata
};
struct x86_platform_ops x86_platform = {
+ .is_untracked_pat_range = default_is_untracked_pat_range,
.calibrate_tsc = NULL,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
--- sle11sp1-2010-03-22.orig/arch/x86/mm/pat-xen.c 2010-03-22 12:52:42.000000000 +0100
+++ sle11sp1-2010-03-22/arch/x86/mm/pat-xen.c 2010-03-22 12:52:58.000000000 +0100
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
+#include <asm/x86_init.h>
#include <asm/pgtable.h>
#include <asm/fcntl.h>
#include <asm/e820.h>
@@ -372,6 +373,11 @@ static int free_ram_pages_type(u64 start
return 0;
}
+int default_is_untracked_pat_range(u64 start, u64 end)
+{
+ return is_ISA_range(start, end);
+}
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_WB
@@ -412,7 +418,7 @@ int reserve_memtype(u64 start, u64 end,
}
/* Low ISA region is always mapped WB in page table. No need to track */
- if (is_ISA_range(start, end - 1)) {
+ if (x86_platform.is_untracked_pat_range(start, end - 1)) {
if (new_type)
*new_type = _PAGE_CACHE_WB;
return 0;
@@ -521,7 +527,7 @@ int free_memtype(u64 start, u64 end)
return 0;
/* Low ISA region is always mapped WB. No need to track */
- if (is_ISA_range(start, end - 1))
+ if (x86_platform.is_untracked_pat_range(start, end - 1))
return 0;
is_range_ram = pat_pagerange_is_ram(start, end);
@@ -603,7 +609,7 @@ static unsigned long lookup_memtype(u64
int rettype = _PAGE_CACHE_WB;
struct memtype *entry;
- if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
+ if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE - 1))
return rettype;
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {