diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f2040d46f095..eb5e429b1ea5 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2979,6 +2979,10 @@ the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. + pax_sanitize_slab= + 0/1 to disable/enable slab object sanitization (disabled by + default). + pcbit= [HW,ISDN] pcd. [PARIDE] diff --git a/fs/buffer.c b/fs/buffer.c index 249b83fafe48..b51ce983ec7d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3549,7 +3549,7 @@ void __init buffer_init(void) bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), + SLAB_MEM_SPREAD|SLAB_NO_SANITIZE), NULL); /* diff --git a/fs/dcache.c b/fs/dcache.c index 2acfc69878f5..f2f575aaa27c 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3232,7 +3232,7 @@ void __init vfs_caches_init_early(void) void __init vfs_caches_init(void) { names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NO_SANITIZE, 0, PATH_MAX, NULL); dcache_init(); inode_init(); diff --git a/include/linux/slab.h b/include/linux/slab.h index 81ebd71f8c03..ccb3618c4052 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -27,6 +27,14 @@ #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) /* DEBUG: Poison objects */ #define SLAB_POISON ((slab_flags_t __force)0x00000800U) + +#ifdef CONFIG_PAX_MEMORY_SANITIZE +/* PaX: Do not sanitize objs on free */ +#define SLAB_NO_SANITIZE ((slab_flags_t __force)0x00001000U) +#else +#define SLAB_NO_SANITIZE ((slab_flags_t __force)0x00000000U) +#endif + /* Align objs on cache lines */ #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) /* Use GFP_DMA memory */ diff --git a/kernel/fork.c b/kernel/fork.c index a5d21c42acfc..bdc0c9a07ae8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2271,7 +2271,7 @@ void __init proc_caches_init(void) offsetof(struct mm_struct, saved_auxv), sizeof_field(struct mm_struct, saved_auxv), NULL); - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE); mmap_init(); nsproxy_cache_init(); } diff --git a/mm/rmap.c b/mm/rmap.c index 8d5337fed37b..95e2be039402 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -431,10 +431,10 @@ static void anon_vma_ctor(void *data) void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, + 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, - SLAB_PANIC|SLAB_ACCOUNT); + SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE); } /* diff --git a/mm/slab.c b/mm/slab.c index 2f308253c3d7..9cb22d666f59 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3507,6 +3507,17 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); + +#ifdef CONFIG_PAX_MEMORY_SANITIZE + if (pax_sanitize_slab) { + if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) { + memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size); + if (cachep->ctor) + cachep->ctor(objp); + } + } +#endif + kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); diff --git a/mm/slab.h b/mm/slab.h index 68bdf498da3b..d3ba8f16b155 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -80,6 +80,15 @@ extern const struct kmalloc_info_struct { unsigned int size; } kmalloc_info[]; +#ifdef CONFIG_PAX_MEMORY_SANITIZE +#ifdef CONFIG_X86_64 +#define PAX_MEMORY_SANITIZE_VALUE '\xfe' +#else +#define PAX_MEMORY_SANITIZE_VALUE '\xff' +#endif +extern bool pax_sanitize_slab; +#endif + #ifndef CONFIG_SLOB /* Kmalloc array related functions */ void setup_kmalloc_cache_index_table(void); diff --git a/mm/slab_common.c b/mm/slab_common.c index 65408ced18f1..fa458ea26e52 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -58,7 +58,11 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, /* * Merge control. If this is set then no merging of slab caches will occur. */ +#ifdef CONFIG_PAX_MEMORY_SANITIZE +static bool slab_nomerge = true; +#else static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); +#endif static int __init setup_slab_nomerge(char *str) { @@ -81,6 +85,20 @@ unsigned int kmem_cache_size(struct kmem_cache *s) } EXPORT_SYMBOL(kmem_cache_size); +#ifdef CONFIG_PAX_MEMORY_SANITIZE +bool pax_sanitize_slab = false; +static int __init pax_sanitize_slab_setup(char *str) +{ + if (!str) + return -EINVAL; + + pax_sanitize_slab = !!simple_strtol(str, NULL, 0); + pr_info("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis"); + return 0; +} +early_param("pax_sanitize_slab", pax_sanitize_slab_setup); +#endif + #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, unsigned int size) { @@ -291,7 +309,11 @@ static unsigned int calculate_alignment(slab_flags_t flags, */ int slab_unmergeable(struct kmem_cache *s) { - if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) + if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE) +#ifdef CONFIG_PAX_MEMORY_SANITIZE + || pax_sanitize_slab +#endif + ) return 1; if (!is_root_cache(s)) @@ -317,7 +339,11 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, { struct kmem_cache *s; - if (slab_nomerge) + if (slab_nomerge +#ifdef CONFIG_PAX_MEMORY_SANITIZE + || pax_sanitize_slab +#endif + ) return NULL; if (ctor) @@ -467,6 +493,11 @@ kmem_cache_create_usercopy(const char *name, */ flags &= CACHE_CREATE_MASK; +#ifdef CONFIG_PAX_MEMORY_SANITIZE + if (flags & SLAB_DESTROY_BY_RCU) + flags |= SLAB_NO_SANITIZE; +#endif + /* Fail closed on bad usersize of useroffset values. */ if (WARN_ON(!usersize && useroffset) || WARN_ON(size < usersize || size - usersize < useroffset)) diff --git a/mm/slob.c b/mm/slob.c index 623e8a5c46ce..1f2985096f84 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -366,6 +366,11 @@ static void slob_free(void *block, int size) return; } +#ifdef CONFIG_PAX_MEMORY_SANITIZE + if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE))) + memset(block, PAX_MEMORY_SANITIZE_VALUE, size); +#endif + if (!slob_page_free(sp)) { /* This slob page is about to become partially free. Easy! */ sp->units = units; diff --git a/mm/slub.c b/mm/slub.c index 613c8dc2f409..3ae38641924a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2985,8 +2985,25 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, * With KASAN enabled slab_free_freelist_hook modifies the freelist * to remove objects, whose reuse must be delayed. */ - if (slab_free_freelist_hook(s, &head, &tail)) + if (slab_free_freelist_hook(s, &head, &tail)) { +#ifdef CONFIG_PAX_MEMORY_SANITIZE + if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) { + int offset = s->offset ? 0 : sizeof(void *); + void *x = head; + void *tail_obj = tail ? : head; + + while (1) { + memset(x + offset, PAX_MEMORY_SANITIZE_VALUE, s->object_size - offset); + if (s->ctor) + s->ctor(x); + if (x == tail_obj) + break; + x = get_freepointer(s, x); + } + } +#endif do_slab_free(s, page, head, tail, cnt, addr); + } } #ifdef CONFIG_KASAN @@ -3512,6 +3529,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) s->inuse = size; if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || +#ifdef CONFIG_PAX_MEMORY_SANITIZE + (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) || +#endif s->ctor)) { /* * Relocate free pointer after the object if it is not diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 345b51837ca8..a2888903091d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3917,14 +3917,14 @@ void __init skb_init(void) skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", sizeof(struct sk_buff), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NO_SANITIZE, offsetof(struct sk_buff, cb), sizeof_field(struct sk_buff, cb), NULL); skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", sizeof(struct sk_buff_fclones), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NO_SANITIZE, NULL); } diff --git a/security/Kconfig b/security/Kconfig index c4302067a3ad..0846636e8b96 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -6,6 +6,37 @@ menu "Security options" source security/keys/Kconfig +menu "Miscellaneous hardening features" + +config PAX_MEMORY_SANITIZE + bool "Sanitize the freed memory" + default y + help + By saying Y here the kernel will erase the contents of slab objects + as soon as they are freed. This in turn reduces the lifetime of data + stored in them, making it less likely that sensitive information such + as passwords, cryptographic secrets, etc stay in memory for too long. + + This is especially useful for programs whose runtime is short, long + lived processes and the kernel itself benefit from this as long as + they ensure timely freeing of memory that may hold sensitive + information. + + A nice side effect of the sanitization of slab objects is the + reduction of possible info leaks caused by padding bytes within the + leaky structures. Use-after-free bugs for structures containing + pointers can also be detected as dereferencing the sanitized pointer + will generate an access violation. + + The tradeoff is performance impact, on a single CPU system kernel + compilation sees a 3% slowdown, other systems and workloads may vary + and you are advised to test this feature on your expected workload + before deploying it. + + Slab sanitization can be disabled with the kernel commandline + parameter "pax_sanitize_slab=0". +endmenu + config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" default n