kernel-5.15/sanitize-memory.patch
Evgenii Shatokhin 0260460708 Updated to version 4.8.4, the first take
Besides the upstream kernel update to 4.8.x, the following changes were
made:

* BFQ was updated to v8r4
* AUFS was updated to version 4.8-20161010
2016-10-23 22:01:06 +03:00

324 lines
10 KiB
Diff

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a4f4d69..4af52d6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2888,6 +2888,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
+ pax_sanitize_slab=
+ 0/1 to disable/enable slab object sanitization (disabled by
+ default).
+
pcbit= [HW,ISDN]
pcd. [PARIDE]
diff --git a/fs/buffer.c b/fs/buffer.c
index 9c8eb9b..236a1ca 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3476,7 +3476,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD),
+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
NULL);
/*
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c7cc95..4d1663e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3608,7 +3608,8 @@ void __init vfs_caches_init_early(void)
void __init vfs_caches_init(void)
{
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+ SLAB_NO_SANITIZE, NULL);
dcache_init();
inode_init();
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4293808..70e883a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -23,6 +23,13 @@
#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
+#else
+#define SLAB_NO_SANITIZE 0x00000000UL
+#endif
+
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
diff --git a/kernel/fork.c b/kernel/fork.c
index beb3172..5df26d4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1932,7 +1932,7 @@ void __init proc_caches_init(void)
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
NULL);
- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE);
mmap_init();
nsproxy_cache_init();
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 1ef3640..aead057 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -429,10 +429,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE,
anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
- SLAB_PANIC|SLAB_ACCOUNT);
+ SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE);
}
/*
diff --git a/mm/slab.c b/mm/slab.c
index b672710..570a6a5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3524,6 +3524,17 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab) {
+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
+ if (cachep->ctor)
+ cachep->ctor(objp);
+ }
+ }
+#endif
+
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
diff --git a/mm/slab.h b/mm/slab.h
index 9653f2e..47a0f7f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -71,6 +71,15 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#ifdef CONFIG_X86_64
+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
+#else
+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
+#endif
+extern bool pax_sanitize_slab;
+#endif
+
unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 71f0b28..fd97b10 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -44,7 +44,11 @@ struct kmem_cache *kmem_cache;
* Merge control. If this is set then no merging of slab caches will occur.
* (Could be removed. This was introduced to pacify the merge skeptics.)
*/
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+static int slab_nomerge = 1;
+#else
static int slab_nomerge;
+#endif
static int __init setup_slab_nomerge(char *str)
{
@@ -67,6 +71,20 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
}
EXPORT_SYMBOL(kmem_cache_size);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+bool pax_sanitize_slab = false;
+static int __init pax_sanitize_slab_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
+ pr_info("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
+ return 0;
+}
+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
@@ -232,7 +250,11 @@ static inline void destroy_memcg_params(struct kmem_cache *s)
*/
int slab_unmergeable(struct kmem_cache *s)
{
- if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
+ if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ || pax_sanitize_slab
+#endif
+ )
return 1;
if (!is_root_cache(s))
@@ -255,7 +277,11 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
{
struct kmem_cache *s;
- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
+ if (slab_nomerge || (flags & SLAB_NEVER_MERGE)
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ || pax_sanitize_slab
+#endif
+ )
return NULL;
if (ctor)
@@ -411,6 +437,11 @@ kmem_cache_create(const char *name, size_t size, size_t align,
* passed flags.
*/
flags &= CACHE_CREATE_MASK;
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (flags & SLAB_DESTROY_BY_RCU)
+ flags |= SLAB_NO_SANITIZE;
+#endif
s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
diff --git a/mm/slob.c b/mm/slob.c
index 5ec1580..385cdbc 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -365,6 +365,11 @@ static void slob_free(void *block, int size)
return;
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
diff --git a/mm/slub.c b/mm/slub.c
index 9adae58..56e456c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2934,6 +2934,23 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
unsigned long addr)
{
slab_free_freelist_hook(s, head, tail);
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
+ int offset = s->offset ? 0 : sizeof(void *);
+ void *x = head;
+
+ while (1) {
+ memset(x + offset, PAX_MEMORY_SANITIZE_VALUE, s->object_size - offset);
+ if (s->ctor)
+ s->ctor(x);
+ if (x == tail_obj)
+ break;
+ x = get_freepointer(s, x);
+ }
+ }
+#endif
+
/*
* slab_free_freelist_hook() could have put the items into quarantine.
* If so, no need to free them.
@@ -3431,6 +3448,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
s->inuse = size;
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
+#endif
s->ctor)) {
/*
* Relocate free pointer after the object if it is not
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3864b4b6..d1dd10d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3432,12 +3432,14 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+ SLAB_NO_SANITIZE,
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+ SLAB_NO_SANITIZE,
NULL);
}
diff --git a/security/Kconfig b/security/Kconfig
index 118f454..e2a0281 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,37 @@ menu "Security options"
source security/keys/Kconfig
+menu "Miscellaneous hardening features"
+
+config PAX_MEMORY_SANITIZE
+ bool "Sanitize the freed memory"
+ default y
+ help
+ By saying Y here the kernel will erase the contents of slab objects
+ as soon as they are freed. This in turn reduces the lifetime of data
+ stored in them, making it less likely that sensitive information such
+ as passwords, cryptographic secrets, etc stay in memory for too long.
+
+ This is especially useful for programs whose runtime is short, long
+ lived processes and the kernel itself benefit from this as long as
+ they ensure timely freeing of memory that may hold sensitive
+ information.
+
+ A nice side effect of the sanitization of slab objects is the
+ reduction of possible info leaks caused by padding bytes within the
+ leaky structures. Use-after-free bugs for structures containing
+ pointers can also be detected as dereferencing the sanitized pointer
+ will generate an access violation.
+
+ The tradeoff is performance impact, on a single CPU system kernel
+ compilation sees a 3% slowdown, other systems and workloads may vary
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
+ Slab sanitization can be disabled with the kernel commandline
+ parameter "pax_sanitize_slab=0".
+endmenu
+
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n