diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2014-11-02 15:01:24 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2014-11-03 07:09:57 +0000 |
commit | 825c3f169809cd05badb80f982c91cf891d7b19c (patch) | |
tree | c75f0c3c7110f0fa902d1e7668c74120cf1be8c9 /main/linux-grsec | |
parent | 69e0d8be40255e8d5dc2ae0976765fb18397a344 (diff) | |
download | aports-825c3f169809cd05badb80f982c91cf891d7b19c.tar.bz2 aports-825c3f169809cd05badb80f982c91cf891d7b19c.tar.xz |
main/linux-grsec: upgrade to 3.14.23
Diffstat (limited to 'main/linux-grsec')
-rw-r--r-- | main/linux-grsec/APKBUILD | 18 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-3.0-3.14.23-201410312212.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.22-201410192047.patch) | 1074 |
2 files changed, 824 insertions, 268 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index fb2d9bc2c1..5a74c96a7b 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -2,12 +2,12 @@ _flavor=grsec pkgname=linux-${_flavor} -pkgver=3.14.22 +pkgver=3.14.23 case $pkgver in *.*.*) _kernver=${pkgver%.*};; *.*) _kernver=${pkgver};; esac -pkgrel=1 +pkgrel=0 pkgdesc="Linux kernel with grsecurity" url=http://grsecurity.net depends="mkinitfs linux-firmware" @@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}} install= source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz - grsecurity-3.0-3.14.22-201410192047.patch + grsecurity-3.0-3.14.23-201410312212.patch fix-memory-map-for-PIE-applications.patch imx6q-no-unclocked-sleep.patch @@ -165,24 +165,24 @@ dev() { } md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz -6634fc5051468ef7ff96187edc108825 patch-3.14.22.xz -2a930c98841c849c7517828395d2583f grsecurity-3.0-3.14.22-201410192047.patch +45a2b9fbe6c9075093fb015f818b4e37 patch-3.14.23.xz +0de7fd3ed253841e486817250f09dfee grsecurity-3.0-3.14.23-201410312212.patch c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch 1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch 870b91f0eb07294ba453ac61b052c0b6 kernelconfig.x86 38b50cd1a7670f886c5e9fe9f1f91496 kernelconfig.x86_64 3d79d27ce4aea637042bb70055c35a3d kernelconfig.armhf" sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz -459d9a5d38d496a6448c896e39c342c71fee29c49da38192104d3acc4f0cdd43 patch-3.14.22.xz -816f9fee2e551b16a20aff3123325194299c03f8a397539fa72d2654016bd538 grsecurity-3.0-3.14.22-201410192047.patch +451199487f3e311ff57729f9104c23eeab1db528f15f2091da74cb2fd565f56e patch-3.14.23.xz +55333d8467e557925bb0116c6ff92ba39c075374a12b7125970364389182f0a5 grsecurity-3.0-3.14.23-201410312212.patch 500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch 21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch bf953a65ba047b5316509da5bc7a6dbcee12767e343d26e8360369d27bfdbe78 kernelconfig.x86 d555a01f2b464e20cfa71c67ea6d571f80c707c5a3fea33879de09b085e2d7b6 kernelconfig.x86_64 a2dc0e30e1d1d691768543a17b51efccfc11ef17c04ac08f2b54c95f25dab75d kernelconfig.armhf" sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz -ccd02031badafe9c981cfc65d10eee674f76cd8bbcfd8d9765ec057b87dcb7d56583fb2b75eb0a6d14fa7aa028e15061aa79fe1618b40fb79dae6c0479e9202b patch-3.14.22.xz -8a673850de30772dedd1323fdaab02e3c0ad15669c9330c1b64b485b6b2153e651915e221f9a8f7d96098540b4aa95a15fd65a0e9a1e7c7b29a49c927e4dd448 grsecurity-3.0-3.14.22-201410192047.patch +31883f947d93e8b489f75d3508efab24f3d5c94f75f6f0e66e34ad8f54de2511eb22e92b8a27bde19bba3c1a510435f3ba181157bdef726120226eba18bd825a patch-3.14.23.xz +7f17d47ffc78e23a80b84921742cfbbc9afff551ad75bfbb4e1399aba6eca6fd8c8b6262232d57cd6c7165ba2bafe80c7f6e1689e495fa3a61740930808a3d53 grsecurity-3.0-3.14.23-201410312212.patch 4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch 87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch dde402be39f68955f9395f807631f1457e90cda76a80e0e198695c8f946cdba02a00fe12a59a77bf5e8b40f5ecb52efbe364449f3e58d8996f27e07b719ac6a4 kernelconfig.x86 diff --git a/main/linux-grsec/grsecurity-3.0-3.14.22-201410192047.patch b/main/linux-grsec/grsecurity-3.0-3.14.23-201410312212.patch index 8d0df77a72..2b0f9bd7fc 100644 --- a/main/linux-grsec/grsecurity-3.0-3.14.22-201410192047.patch +++ b/main/linux-grsec/grsecurity-3.0-3.14.23-201410312212.patch @@ -235,7 +235,7 @@ index b89a739..e289b9b 100644 +zconf.lex.c zoffset.h diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index 7116fda..d8ed6e8 100644 +index 7116fda..2f71588 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1084,6 +1084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. @@ -260,7 +260,7 @@ index 7116fda..d8ed6e8 100644 nosmap [X86] Disable SMAP (Supervisor Mode Access Prevention) even if it is supported by processor. -@@ -2347,6 +2355,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted. +@@ -2347,6 +2355,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted. the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. @@ -270,8 +270,13 @@ index 7116fda..d8ed6e8 100644 + page table updates on X86-64. + + pax_sanitize_slab= -+ 0/1 to disable/enable slab object sanitization (enabled by -+ default). ++ Format: { 0 | 1 | off | fast | full } ++ Options '0' and '1' are only provided for backward ++ compatibility, 'off' or 'fast' should be used instead. ++ 0|off : disable slab object sanitization ++ 1|fast: enable slab object sanitization excluding ++ whitelisted slabs (default) ++ full : sanitize all slabs, even the whitelisted ones + + pax_softmode= 0/1 to disable/enable PaX softmode on boot already. + @@ -287,7 +292,7 @@ index 7116fda..d8ed6e8 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index a59980e..46601e4 100644 +index 135a04a..79b5e32 100644 --- a/Makefile +++ b/Makefile @@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -854,6 +859,22 @@ index 98838a0..b304fb4 100644 } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) +diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c +index a2ff5c5..ecf6a78 100644 +--- a/arch/arc/kernel/kgdb.c ++++ b/arch/arc/kernel/kgdb.c +@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + return -1; + } + +-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) +-{ +- return instruction_pointer(regs); +-} +- + int kgdb_arch_init(void) + { + single_step_data.armed = 0; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4733d32..b142a40 100644 --- a/arch/arm/Kconfig @@ -876,10 +897,10 @@ index 4733d32..b142a40 100644 kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h -index 62d2cb5..0d7f7f5 100644 +index 62d2cb5..26e43ca 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h -@@ -18,17 +18,35 @@ +@@ -18,17 +18,41 @@ #include <asm/barrier.h> #include <asm/cmpxchg.h> @@ -891,6 +912,12 @@ index 62d2cb5..0d7f7f5 100644 #ifdef __KERNEL__ ++#ifdef CONFIG_THUMB2_KERNEL ++#define REFCOUNT_TRAP_INSN "bkpt 0xf1" ++#else ++#define REFCOUNT_TRAP_INSN "bkpt 0xf103" ++#endif ++ +#define _ASM_EXTABLE(from, to) \ +" .pushsection __ex_table,\"a\"\n"\ +" .align 3\n" \ @@ -915,7 +942,7 @@ index 62d2cb5..0d7f7f5 100644 #if __LINUX_ARM_ARCH__ >= 6 -@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v) +@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v) prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add\n" @@ -924,7 +951,7 @@ index 62d2cb5..0d7f7f5 100644 + +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -952,7 +979,7 @@ index 62d2cb5..0d7f7f5 100644 "1: ldrex %0, [%3]\n" " add %0, %0, %4\n" " strex %1, %0, [%3]\n" -@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v) +@@ -62,6 +116,42 @@ static inline int atomic_add_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_add_return\n" @@ -962,7 +989,7 @@ index 62d2cb5..0d7f7f5 100644 +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" +" mov %0, %1\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -995,7 +1022,7 @@ index 62d2cb5..0d7f7f5 100644 "1: ldrex %0, [%3]\n" " add %0, %0, %4\n" " strex %1, %0, [%3]\n" -@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v) +@@ -83,6 +173,36 @@ static inline void atomic_sub(int i, atomic_t *v) prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub\n" @@ -1004,7 +1031,7 @@ index 62d2cb5..0d7f7f5 100644 + +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1032,7 +1059,7 @@ index 62d2cb5..0d7f7f5 100644 "1: ldrex %0, [%3]\n" " sub %0, %0, %4\n" " strex %1, %0, [%3]\n" -@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) +@@ -101,11 +221,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_sub_return\n" @@ -1044,7 +1071,7 @@ index 62d2cb5..0d7f7f5 100644 +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" +" mov %0, %1\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1060,7 +1087,7 @@ index 62d2cb5..0d7f7f5 100644 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); -@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +@@ -138,6 +272,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } @@ -1089,7 +1116,7 @@ index 62d2cb5..0d7f7f5 100644 #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP -@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v) +@@ -156,7 +312,17 @@ static inline int atomic_add_return(int i, atomic_t *v) return val; } @@ -1107,7 +1134,7 @@ index 62d2cb5..0d7f7f5 100644 static inline int atomic_sub_return(int i, atomic_t *v) { -@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) +@@ -171,6 +337,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) return val; } #define atomic_sub(i, v) (void) atomic_sub_return(i, v) @@ -1118,7 +1145,7 @@ index 62d2cb5..0d7f7f5 100644 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { -@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +@@ -186,9 +356,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } @@ -1137,7 +1164,7 @@ index 62d2cb5..0d7f7f5 100644 static inline int __atomic_add_unless(atomic_t *v, int a, int u) { -@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) +@@ -201,11 +380,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc(v) atomic_add(1, v) @@ -1165,7 +1192,7 @@ index 62d2cb5..0d7f7f5 100644 #define atomic_dec_return(v) (atomic_sub_return(1, v)) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -@@ -221,6 +410,14 @@ typedef struct { +@@ -221,6 +416,14 @@ typedef struct { long long counter; } atomic64_t; @@ -1180,7 +1207,7 @@ index 62d2cb5..0d7f7f5 100644 #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE -@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v) +@@ -237,6 +440,19 @@ static inline long long atomic64_read(const atomic64_t *v) return result; } @@ -1200,7 +1227,7 @@ index 62d2cb5..0d7f7f5 100644 static inline void atomic64_set(atomic64_t *v, long long i) { __asm__ __volatile__("@ atomic64_set\n" -@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) +@@ -245,6 +461,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) : "r" (&v->counter), "r" (i) ); } @@ -1216,7 +1243,7 @@ index 62d2cb5..0d7f7f5 100644 #else static inline long long atomic64_read(const atomic64_t *v) { -@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v) +@@ -259,6 +484,19 @@ static inline long long atomic64_read(const atomic64_t *v) return result; } @@ -1236,7 +1263,7 @@ index 62d2cb5..0d7f7f5 100644 static inline void atomic64_set(atomic64_t *v, long long i) { long long tmp; -@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i) +@@ -273,6 +511,21 @@ static inline void atomic64_set(atomic64_t *v, long long i) : "r" (&v->counter), "r" (i) : "cc"); } @@ -1258,7 +1285,7 @@ index 62d2cb5..0d7f7f5 100644 #endif static inline void atomic64_add(long long i, atomic64_t *v) -@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v) +@@ -284,6 +537,37 @@ static inline void atomic64_add(long long i, atomic64_t *v) __asm__ __volatile__("@ atomic64_add\n" "1: ldrexd %0, %H0, [%3]\n" " adds %Q0, %Q0, %Q4\n" @@ -1266,7 +1293,7 @@ index 62d2cb5..0d7f7f5 100644 + +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1296,7 +1323,7 @@ index 62d2cb5..0d7f7f5 100644 " adc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" -@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) +@@ -303,6 +587,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" " adds %Q0, %Q0, %Q4\n" @@ -1306,7 +1333,7 @@ index 62d2cb5..0d7f7f5 100644 +" bvc 3f\n" +" mov %0, %1\n" +" mov %H0, %H1\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1341,7 +1368,7 @@ index 62d2cb5..0d7f7f5 100644 " adc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" -@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v) +@@ -325,6 +647,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v) __asm__ __volatile__("@ atomic64_sub\n" "1: ldrexd %0, %H0, [%3]\n" " subs %Q0, %Q0, %Q4\n" @@ -1349,7 +1376,7 @@ index 62d2cb5..0d7f7f5 100644 + +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1379,7 +1406,7 @@ index 62d2cb5..0d7f7f5 100644 " sbc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" -@@ -344,10 +691,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) +@@ -344,10 +697,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" " subs %Q0, %Q0, %Q4\n" @@ -1390,7 +1417,7 @@ index 62d2cb5..0d7f7f5 100644 +" bvc 3f\n" +" mov %0, %1\n" +" mov %H0, %H1\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1406,7 +1433,7 @@ index 62d2cb5..0d7f7f5 100644 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); -@@ -382,6 +744,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, +@@ -382,6 +750,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, return oldval; } @@ -1438,7 +1465,7 @@ index 62d2cb5..0d7f7f5 100644 static inline long long atomic64_xchg(atomic64_t *ptr, long long new) { long long result; -@@ -406,20 +793,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +@@ -406,20 +799,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) static inline long long atomic64_dec_if_positive(atomic64_t *v) { long long result; @@ -1459,7 +1486,7 @@ index 62d2cb5..0d7f7f5 100644 +" bvc 3f\n" +" mov %Q0, %Q1\n" +" mov %R0, %R1\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1479,7 +1506,7 @@ index 62d2cb5..0d7f7f5 100644 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); -@@ -442,13 +843,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +@@ -442,13 +849,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) " teq %0, %5\n" " teqeq %H0, %H5\n" " moveq %1, #0\n" @@ -1491,7 +1518,7 @@ index 62d2cb5..0d7f7f5 100644 + +#ifdef CONFIG_PAX_REFCOUNT +" bvc 3f\n" -+"2: bkpt 0xf103\n" ++"2: " REFCOUNT_TRAP_INSN "\n" +"3:\n" +#endif + @@ -1508,7 +1535,7 @@ index 62d2cb5..0d7f7f5 100644 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); -@@ -461,10 +874,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +@@ -461,10 +880,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) @@ -3916,7 +3943,7 @@ index 6eb97b3..ac509f6 100644 atomic64_set(&mm->context.id, asid); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index eb8830a..5360ce7 100644 +index eb8830a..e8ff52e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -25,6 +25,7 @@ @@ -4030,7 +4057,7 @@ index eb8830a..5360ce7 100644 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); -@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * +@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * ifsr_info[nr].name = name; } @@ -4110,9 +4137,15 @@ index eb8830a..5360ce7 100644 + +#ifdef CONFIG_PAX_REFCOUNT + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { ++#ifdef CONFIG_THUMB2_KERNEL ++ unsigned short bkpt; ++ ++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { ++#else + unsigned int bkpt; + + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) { ++#endif + current->thread.error_code = ifsr; + current->thread.trap_no = 0; + pax_report_refcount_overflow(regs); @@ -9859,16 +9892,16 @@ index 9b1c36d..209298b 100644 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h -index bcfe063..b333142 100644 +index 2c8d41f..06b1206 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h -@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd) } - #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) + #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD)) - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h index 59ba6f6..4518128 100644 @@ -10053,22 +10086,19 @@ index 96efa7a..16858bf 100644 /* diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h -index a5f01ac..a8811dd 100644 +index cc6275c..7eb8e21 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h -@@ -63,7 +63,10 @@ struct thread_info { +@@ -63,6 +63,8 @@ struct thread_info { struct pt_regs *kern_una_regs; unsigned int kern_una_insn; -- unsigned long fpregs[0] __attribute__ ((aligned(64))); + unsigned long lowest_stack; + -+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] -+ __attribute__ ((aligned(64))); + unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] + __attribute__ ((aligned(64))); }; - - #endif /* !(__ASSEMBLY__) */ -@@ -188,12 +191,13 @@ register struct thread_info *current_thread_info_reg asm("g6"); +@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ /* flag bit 4 is available */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ @@ -10083,7 +10113,7 @@ index a5f01ac..a8811dd 100644 /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. -@@ -213,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); +@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) @@ -10258,7 +10288,7 @@ index 510baec..9ff2607 100644 } while (++count < 16); printk("\n"); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c -index d7b4967..2edf827 100644 +index c6f7113..9299700 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs) @@ -10350,7 +10380,7 @@ index c13c9f2..d572c34 100644 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c -index 8416d7f..f83823c 100644 +index 50c3dd03..adff164 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah; @@ -10660,7 +10690,7 @@ index 6629829..036032d 100644 } diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c -index 4ced92f..965eeed 100644 +index 25d0c7e..b571456 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) @@ -10778,8 +10808,8 @@ index 4ced92f..965eeed 100644 + atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt); } - unsigned long sun4v_err_itlb_vaddr; -@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) + static void sun4v_tlb_error(struct pt_regs *regs) +@@ -2118,9 +2129,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); @@ -10791,7 +10821,7 @@ index 4ced92f..965eeed 100644 (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", -@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) +@@ -2141,9 +2152,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); @@ -10803,7 +10833,7 @@ index 4ced92f..965eeed 100644 (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", -@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) +@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) fp = (unsigned long)sf->fp + STACK_BIAS; } @@ -10819,7 +10849,7 @@ index 4ced92f..965eeed 100644 graph++; } } -@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) +@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) return (struct reg_window *) (fp + STACK_BIAS); } @@ -10828,7 +10858,7 @@ index 4ced92f..965eeed 100644 void die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; -@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) +@@ -2414,7 +2427,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) while (rw && count++ < 30 && kstack_valid(tp, (unsigned long) rw)) { @@ -10837,7 +10867,7 @@ index 4ced92f..965eeed 100644 (void *) rw->ins[7]); rw = kernel_stack_up(rw); -@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) +@@ -2427,8 +2440,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) } user_instruction_dump ((unsigned int __user *) regs->tpc); } @@ -11443,7 +11473,7 @@ index 59dbd46..1dd7f5e 100644 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c -index 4ced3fc..234f1e4 100644 +index 45a413e..fff0231 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -22,6 +22,9 @@ @@ -11932,7 +11962,7 @@ index 4ced3fc..234f1e4 100644 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); -@@ -352,6 +815,29 @@ retry: +@@ -355,6 +818,29 @@ retry: if (!vma) goto bad_area; @@ -12069,10 +12099,10 @@ index d329537..2c3746a 100644 pte_t *huge_pte_alloc(struct mm_struct *mm, diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c -index 9686224..dfbdb10 100644 +index 34506f2..0621e68 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c -@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; +@@ -184,9 +184,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH @@ -12084,7 +12114,7 @@ index 9686224..dfbdb10 100644 #endif #endif -@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page) +@@ -194,7 +194,7 @@ inline void flush_dcache_page_impl(struct page *page) { BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH @@ -12093,7 +12123,7 @@ index 9686224..dfbdb10 100644 #endif #ifdef DCACHE_ALIASING_POSSIBLE -@@ -470,10 +470,10 @@ void mmu_info(struct seq_file *m) +@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m) #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", @@ -33139,7 +33169,7 @@ index f35c66c..84b95ef 100644 if (vma == &gate_vma) return "[vsyscall]"; diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c -index 7b179b4..6bd17777 100644 +index 7b179b49..6bd17777 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) @@ -34108,7 +34138,7 @@ index 0149575..f746de8 100644 + pax_force_retaddr ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c -index af2d431..3cf24f0b 100644 +index af2d431..bc63cba 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) @@ -34400,7 +34430,7 @@ index af2d431..3cf24f0b 100644 fp->bpf_func = (void *)image; } out: -@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work) +@@ -782,10 +887,8 @@ static void bpf_jit_free_deferred(struct work_struct *work) { struct sk_filter *fp = container_of(work, struct sk_filter, work); unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; @@ -34408,7 +34438,6 @@ index af2d431..3cf24f0b 100644 - set_memory_rw(addr, header->pages); - module_free(NULL, header); -+ set_memory_rw(addr, 1); + module_free_exec(NULL, (void *)addr); kfree(fp); } @@ -35205,6 +35234,56 @@ index 1bbedc4..eb795b5 100644 } static unsigned long __init intel_mid_calibrate_tsc(void) +diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h +index 46aa25c..59a68ed 100644 +--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h ++++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h +@@ -10,10 +10,9 @@ + */ + + +-/* __attribute__((weak)) makes these declarations overridable */ + /* For every CPU addition a new get_<cpuname>_ops interface needs + * to be added. + */ +-extern void *get_penwell_ops(void) __attribute__((weak)); +-extern void *get_cloverview_ops(void) __attribute__((weak)); +-extern void *get_tangier_ops(void) __attribute__((weak)); ++extern const void *get_penwell_ops(void); ++extern const void *get_cloverview_ops(void); ++extern const void *get_tangier_ops(void); +diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c +index 23381d2..8ddc10e 100644 +--- a/arch/x86/platform/intel-mid/mfld.c ++++ b/arch/x86/platform/intel-mid/mfld.c +@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void) + pm_power_off = mfld_power_off; + } + +-void *get_penwell_ops(void) ++const void *get_penwell_ops(void) + { + return &penwell_ops; + } + +-void *get_cloverview_ops(void) ++const void *get_cloverview_ops(void) + { + return &penwell_ops; + } +diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c +index aaca917..66eadbc 100644 +--- a/arch/x86/platform/intel-mid/mrfl.c ++++ b/arch/x86/platform/intel-mid/mrfl.c +@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = { + .arch_setup = tangier_arch_setup, + }; + +-void *get_tangier_ops(void) ++const void *get_tangier_ops(void) + { + return &tangier_ops; + } diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index d6ee929..3637cb5 100644 --- a/arch/x86/platform/olpc/olpc_dt.c @@ -36473,6 +36552,20 @@ index 7bdd61b..afec999 100644 static void cryptd_queue_worker(struct work_struct *work); +diff --git a/crypto/cts.c b/crypto/cts.c +index 042223f..133f087 100644 +--- a/crypto/cts.c ++++ b/crypto/cts.c +@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, + /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ + memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); + /* 6. Decrypt En to create Pn-1 */ +- memset(iv, 0, sizeof(iv)); ++ memzero_explicit(iv, sizeof(iv)); ++ + sg_set_buf(&sgsrc[0], s + bsize, bsize); + sg_set_buf(&sgdst[0], d, bsize); + err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 309d345..1632720 100644 --- a/crypto/pcrypt.c @@ -36486,6 +36579,118 @@ index 309d345..1632720 100644 if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); +diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c +index 4279480..7bb0474 100644 +--- a/crypto/sha1_generic.c ++++ b/crypto/sha1_generic.c +@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + src = data + done; + } while (done + SHA1_BLOCK_SIZE <= len); + +- memset(temp, 0, sizeof(temp)); ++ memzero_explicit(temp, sizeof(temp)); + partial = 0; + } + memcpy(sctx->buffer + partial, src, len - done); +diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c +index 5433667..32c5e5e 100644 +--- a/crypto/sha256_generic.c ++++ b/crypto/sha256_generic.c +@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input) + + /* clear any sensitive info... */ + a = b = c = d = e = f = g = h = t1 = t2 = 0; +- memset(W, 0, 64 * sizeof(u32)); ++ memzero_explicit(W, 64 * sizeof(u32)); + } + +- + static int sha224_init(struct shash_desc *desc) + { + struct sha256_state *sctx = shash_desc_ctx(desc); +@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) + sha256_final(desc, D); + + memcpy(hash, D, SHA224_DIGEST_SIZE); +- memset(D, 0, SHA256_DIGEST_SIZE); ++ memzero_explicit(D, SHA256_DIGEST_SIZE); + + return 0; + } +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c +index 6ed124f..04d295a 100644 +--- a/crypto/sha512_generic.c ++++ b/crypto/sha512_generic.c +@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash) + sha512_final(desc, D); + + memcpy(hash, D, 48); +- memset(D, 0, 64); ++ memzero_explicit(D, 64); + + return 0; + } +diff --git a/crypto/tgr192.c b/crypto/tgr192.c +index 8740355..3c7af0d 100644 +--- a/crypto/tgr192.c ++++ b/crypto/tgr192.c +@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out) + + tgr192_final(desc, D); + memcpy(out, D, TGR160_DIGEST_SIZE); +- memset(D, 0, TGR192_DIGEST_SIZE); ++ memzero_explicit(D, TGR192_DIGEST_SIZE); + + return 0; + } +@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out) + + tgr192_final(desc, D); + memcpy(out, D, TGR128_DIGEST_SIZE); +- memset(D, 0, TGR192_DIGEST_SIZE); ++ memzero_explicit(D, TGR192_DIGEST_SIZE); + + return 0; + } +diff --git a/crypto/vmac.c b/crypto/vmac.c +index 2eb11a3..d84c24b 100644 +--- a/crypto/vmac.c ++++ b/crypto/vmac.c +@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out) + } + mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); + memcpy(out, &mac, sizeof(vmac_t)); +- memset(&mac, 0, sizeof(vmac_t)); ++ memzero_explicit(&mac, sizeof(vmac_t)); + memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); + ctx->partial_size = 0; + return 0; +diff --git a/crypto/wp512.c b/crypto/wp512.c +index 180f1d6..ec64e77 100644 +--- a/crypto/wp512.c ++++ b/crypto/wp512.c +@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out) + u8 D[64]; + + wp512_final(desc, D); +- memcpy (out, D, WP384_DIGEST_SIZE); +- memset (D, 0, WP512_DIGEST_SIZE); ++ memcpy(out, D, WP384_DIGEST_SIZE); ++ memzero_explicit(D, WP512_DIGEST_SIZE); + + return 0; + } +@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out) + u8 D[64]; + + wp512_final(desc, D); +- memcpy (out, D, WP256_DIGEST_SIZE); +- memset (D, 0, WP512_DIGEST_SIZE); ++ memcpy(out, D, WP256_DIGEST_SIZE); ++ memzero_explicit(D, WP512_DIGEST_SIZE); + + return 0; + } diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 15dddc1..b61cf0c 100644 --- a/drivers/acpi/acpica/hwxfsleep.c @@ -39176,7 +39381,7 @@ index 8320abd..ec48108 100644 if (cmd != SIOCWANDEV) diff --git a/drivers/char/random.c b/drivers/char/random.c -index 429b75b..de805d0 100644 +index 429b75b..58488cc 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -284,9 +284,6 @@ @@ -39222,6 +39427,35 @@ index 429b75b..de805d0 100644 unsigned int add = ((pool_size - entropy_count)*anfrac*3) >> s; +@@ -1063,8 +1060,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) + * pool while mixing, and hash one final time. + */ + sha_transform(hash.w, extract, workspace); +- memset(extract, 0, sizeof(extract)); +- memset(workspace, 0, sizeof(workspace)); ++ memzero_explicit(extract, sizeof(extract)); ++ memzero_explicit(workspace, sizeof(workspace)); + + /* + * In case the hash function has some recognizable output +@@ -1076,7 +1073,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) + hash.w[2] ^= rol32(hash.w[2], 16); + + memcpy(out, &hash, EXTRACT_SIZE); +- memset(&hash, 0, sizeof(hash)); ++ memzero_explicit(&hash, sizeof(hash)); + } + + static ssize_t extract_entropy(struct entropy_store *r, void *buf, +@@ -1124,7 +1121,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, + } + + /* Wipe data just returned from memory */ +- memset(tmp, 0, sizeof(tmp)); ++ memzero_explicit(tmp, sizeof(tmp)); + + return ret; + } @@ -1151,7 +1148,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, extract_buf(r, tmp); @@ -39231,6 +39465,15 @@ index 429b75b..de805d0 100644 ret = -EFAULT; break; } +@@ -1162,7 +1159,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, + } + + /* Wipe data just returned from memory */ +- memset(tmp, 0, sizeof(tmp)); ++ memzero_explicit(tmp, sizeof(tmp)); + + return ret; + } @@ -1507,7 +1504,7 @@ EXPORT_SYMBOL(generate_random_uuid); #include <linux/sysctl.h> @@ -42048,12 +42291,12 @@ index cedc6da..2c3da2a 100644 if (atomic_read(&uhid->report_done)) goto unlock; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c -index 69ea36f..8dbf4bb 100644 +index e99e71a..e4ae549 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c -@@ -364,8 +364,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, +@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + unsigned long flags; int ret = 0; - int t; - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); - atomic_inc(&vmbus_connection.next_gpadl_handle); @@ -49054,7 +49297,7 @@ index fb02fc2..83dc2c3 100644 kfree(msi_dev_attr); ++count; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c -index 276ef9c..1d33a36 100644 +index 39a207a..d1ec78a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1112,7 +1112,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) @@ -50822,7 +51065,7 @@ index 1f42662..bf9836c 100644 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); extern void qla2x00_init_host_attr(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c -index 83cb612..9b7b08c 100644 +index 23c1b0c..e8035ca 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1491,8 +1491,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) @@ -58614,10 +58857,10 @@ index f70119f..ab5894d 100644 spin_lock_init(&delayed_root->lock); init_waitqueue_head(&delayed_root->wait); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index a6d8efa..2f062cf 100644 +index 0b72006..264c7de 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c -@@ -3491,9 +3491,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) +@@ -3494,9 +3494,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; @@ -58630,7 +58873,7 @@ index a6d8efa..2f062cf 100644 info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &root->fs_info->space_info, -@@ -3515,10 +3518,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) +@@ -3518,10 +3521,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; @@ -59723,7 +59966,7 @@ index a93f7e6..d58bcbe 100644 return 0; while (nr) { diff --git a/fs/dcache.c b/fs/dcache.c -index 58d57da..f91b141 100644 +index 58d57da..a3f889f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -250,7 +250,7 @@ static void __d_free(struct rcu_head *head) @@ -59800,7 +60043,15 @@ index 58d57da..f91b141 100644 d_lru_isolate(dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; -@@ -1268,7 +1268,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) +@@ -1159,6 +1159,7 @@ out_unlock: + return; + + rename_retry: ++ done_seqretry(&rename_lock, seq); + if (!retry) + return; + seq = 1; +@@ -1268,7 +1269,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) * loop in shrink_dcache_parent() might not make any progress * and loop forever. */ @@ -59809,7 +60060,7 @@ index 58d57da..f91b141 100644 dentry_lru_del(dentry); } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { /* -@@ -1322,11 +1322,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) +@@ -1322,11 +1323,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) struct select_data *data = _data; enum d_walk_ret ret = D_WALK_CONTINUE; @@ -59823,7 +60074,7 @@ index 58d57da..f91b141 100644 goto out; printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%s}" -@@ -1336,7 +1336,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) +@@ -1336,7 +1337,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) dentry->d_inode ? dentry->d_inode->i_ino : 0UL, dentry->d_name.name, @@ -59832,7 +60083,7 @@ index 58d57da..f91b141 100644 dentry->d_sb->s_type->name, dentry->d_sb->s_id); BUG(); -@@ -1494,7 +1494,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -1494,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) */ dentry->d_iname[DNAME_INLINE_LEN-1] = 0; if (name->len > DNAME_INLINE_LEN-1) { @@ -59841,7 +60092,7 @@ index 58d57da..f91b141 100644 if (!dname) { kmem_cache_free(dentry_cache, dentry); return NULL; -@@ -1512,7 +1512,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -1512,7 +1513,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) smp_wmb(); dentry->d_name.name = dname; @@ -59850,7 +60101,7 @@ index 58d57da..f91b141 100644 dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); -@@ -2275,7 +2275,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) +@@ -2275,7 +2276,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) goto next; } @@ -59859,7 +60110,7 @@ index 58d57da..f91b141 100644 found = dentry; spin_unlock(&dentry->d_lock); break; -@@ -2374,7 +2374,7 @@ again: +@@ -2374,7 +2375,7 @@ again: spin_lock(&dentry->d_lock); inode = dentry->d_inode; isdir = S_ISDIR(inode->i_mode); @@ -59868,7 +60119,7 @@ index 58d57da..f91b141 100644 if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); cpu_relax(); -@@ -3313,7 +3313,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) +@@ -3313,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) if (!(dentry->d_flags & DCACHE_GENOCIDE)) { dentry->d_flags |= DCACHE_GENOCIDE; @@ -59877,7 +60128,7 @@ index 58d57da..f91b141 100644 } } return D_WALK_CONTINUE; -@@ -3429,7 +3429,8 @@ void __init vfs_caches_init(unsigned long mempages) +@@ -3429,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages) mempages -= reserve; names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, @@ -59904,7 +60155,7 @@ index 1576195..49a19ae 100644 } EXPORT_SYMBOL_GPL(debugfs_create_dir); diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c -index b167ca4..a224e19 100644 +index a85ceb7..5097313b 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz) @@ -63648,10 +63899,10 @@ index dd2f2c5..27e6c48 100644 out: return len; diff --git a/fs/namespace.c b/fs/namespace.c -index 75536db..7ec079e 100644 +index c7d4a0a..93207ab 100644 --- a/fs/namespace.c +++ b/fs/namespace.c -@@ -1369,6 +1369,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1371,6 +1371,9 @@ static int do_umount(struct mount *mnt, int flags) if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); @@ -63661,7 +63912,7 @@ index 75536db..7ec079e 100644 return retval; } -@@ -1391,6 +1394,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1393,6 +1396,9 @@ static int do_umount(struct mount *mnt, int flags) } unlock_mount_hash(); namespace_unlock(); @@ -63671,7 +63922,7 @@ index 75536db..7ec079e 100644 return retval; } -@@ -1410,7 +1416,7 @@ static inline bool may_mount(void) +@@ -1412,7 +1418,7 @@ static inline bool may_mount(void) * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ @@ -63680,7 +63931,7 @@ index 75536db..7ec079e 100644 { struct path path; struct mount *mnt; -@@ -1452,7 +1458,7 @@ out: +@@ -1454,7 +1460,7 @@ out: /* * The 2.0 compatible umount. No flags. */ @@ -63689,7 +63940,7 @@ index 75536db..7ec079e 100644 { return sys_umount(name, 0); } -@@ -2501,6 +2507,16 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2503,6 +2509,16 @@ long do_mount(const char *dev_name, const char *dir_name, MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); @@ -63706,7 +63957,7 @@ index 75536db..7ec079e 100644 if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); -@@ -2515,6 +2531,9 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2517,6 +2533,9 @@ long do_mount(const char *dev_name, const char *dir_name, dev_name, data_page); dput_out: path_put(&path); @@ -63716,7 +63967,7 @@ index 75536db..7ec079e 100644 return retval; } -@@ -2532,7 +2551,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) +@@ -2534,7 +2553,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ @@ -63725,7 +63976,7 @@ index 75536db..7ec079e 100644 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { -@@ -2547,7 +2566,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2549,7 +2568,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) kfree(new_ns); return ERR_PTR(ret); } @@ -63734,7 +63985,7 @@ index 75536db..7ec079e 100644 atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); -@@ -2557,7 +2576,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2559,7 +2578,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) return new_ns; } @@ -63743,7 +63994,7 @@ index 75536db..7ec079e 100644 struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; -@@ -2678,8 +2697,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) +@@ -2680,8 +2699,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) } EXPORT_SYMBOL(mount_subtree); @@ -63754,7 +64005,7 @@ index 75536db..7ec079e 100644 { int ret; char *kernel_type; -@@ -2792,6 +2811,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, +@@ -2794,6 +2813,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (error) goto out2; @@ -63766,7 +64017,7 @@ index 75536db..7ec079e 100644 get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); -@@ -2829,6 +2853,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, +@@ -2831,6 +2855,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, /* make sure we can reach put_old from new_root */ if (!is_path_reachable(old_mnt, old.dentry, &new)) goto out4; @@ -63776,7 +64027,7 @@ index 75536db..7ec079e 100644 root_mp->m_count++; /* pin it so it won't go away */ lock_mount_hash(); detach_mnt(new_mnt, &parent_path); -@@ -3060,7 +3087,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) +@@ -3062,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; @@ -64019,7 +64270,7 @@ index a80a741..7b96e1b 100644 } diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c -index 287a22c..4e56e4e 100644 +index de6323e..4931489 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, @@ -64151,6 +64402,19 @@ index 0440134..d52c93a 100644 bail: if (handle) +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c +index feed025f..cee9402 100644 +--- a/fs/ocfs2/namei.c ++++ b/fs/ocfs2/namei.c +@@ -158,7 +158,7 @@ bail_add: + * NOTE: This dentry already has ->d_op set from + * ocfs2_get_parent() and ocfs2_get_dentry() + */ +- if (ret) ++ if (!IS_ERR_OR_NULL(ret)) + dentry = ret; + + status = ocfs2_dentry_attach_lock(dentry, inode, diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 553f53c..aaf5133 100644 --- a/fs/ocfs2/ocfs2.h @@ -64708,7 +64972,7 @@ index 2183fcf..3c32a98 100644 help Various /proc files exist to monitor process memory utilization: diff --git a/fs/proc/array.c b/fs/proc/array.c -index baf3464..6873520 100644 +index baf3464..5b394ec 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -60,6 +60,7 @@ @@ -64846,14 +65110,22 @@ index baf3464..6873520 100644 if (mm) { size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); -@@ -581,6 +649,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, +@@ -581,6 +649,21 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, return 0; } +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR +int proc_pid_ipaddr(struct task_struct *task, char *buffer) +{ -+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip); ++ unsigned long flags; ++ u32 curr_ip = 0; ++ ++ if (lock_task_sighand(task, &flags)) { ++ curr_ip = task->signal->curr_ip; ++ unlock_task_sighand(task, &flags); ++ } ++ ++ return sprintf(buffer, "%pI4\n", &curr_ip); +} +#endif + @@ -77402,7 +77674,7 @@ index 0000000..3860c7e +} diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c new file mode 100644 -index 0000000..c0aef3a +index 0000000..e3650b6 --- /dev/null +++ b/grsecurity/grsec_sock.c @@ -0,0 +1,244 @@ @@ -77529,10 +77801,10 @@ index 0000000..c0aef3a + +#endif + -+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) ++void gr_update_task_in_ip_table(const struct inet_sock *inet) +{ +#ifdef CONFIG_GRKERNSEC -+ struct signal_struct *sig = task->signal; ++ struct signal_struct *sig = current->signal; + struct conn_table_entry *newent; + + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); @@ -79383,6 +79655,19 @@ index 939533d..cf0a57c 100644 /** * struct clk_init_data - holds init data that's common to all clocks and is +diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h +index 67301a4..879065d 100644 +--- a/include/linux/clocksource.h ++++ b/include/linux/clocksource.h +@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void); + extern void clocksource_change_rating(struct clocksource *cs, int rating); + extern void clocksource_suspend(void); + extern void clocksource_resume(void); +-extern struct clocksource * __init __weak clocksource_default_clock(void); ++extern struct clocksource * __init clocksource_default_clock(void); + extern void clocksource_mark_unstable(struct clocksource *cs); + + extern u64 diff --git a/include/linux/compat.h b/include/linux/compat.h index 3f448c6..df3ce1d 100644 --- a/include/linux/compat.h @@ -79717,6 +80002,32 @@ index d08e4d2..95fad61 100644 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); /** +diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h +index 7032518..60023e5 100644 +--- a/include/linux/crash_dump.h ++++ b/include/linux/crash_dump.h +@@ -14,14 +14,13 @@ + extern unsigned long long elfcorehdr_addr; + extern unsigned long long elfcorehdr_size; + +-extern int __weak elfcorehdr_alloc(unsigned long long *addr, +- unsigned long long *size); +-extern void __weak elfcorehdr_free(unsigned long long addr); +-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); +-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); +-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, +- unsigned long from, unsigned long pfn, +- unsigned long size, pgprot_t prot); ++extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); ++extern void elfcorehdr_free(unsigned long long addr); ++extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); ++extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); ++extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, ++ unsigned long from, unsigned long pfn, ++ unsigned long size, pgprot_t prot); + + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); diff --git a/include/linux/cred.h b/include/linux/cred.h index 04421e8..a85afd4 100644 --- a/include/linux/cred.h @@ -81917,7 +82228,7 @@ index a74c3a8..28d3f21 100644 extern struct key_type key_type_keyring; diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h -index 6b06d37..c134867 100644 +index 6b06d37..19f605f 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -52,7 +52,7 @@ extern int kgdb_connected; @@ -81938,7 +82249,7 @@ index 6b06d37..c134867 100644 /** * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. -@@ -279,7 +279,7 @@ struct kgdb_io { +@@ -279,11 +279,11 @@ struct kgdb_io { void (*pre_exception) (void); void (*post_exception) (void); int is_console; @@ -81947,6 +82258,11 @@ index 6b06d37..c134867 100644 extern struct kgdb_arch arch_kgdb_ops; +-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); ++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); + + #ifdef CONFIG_SERIAL_KGDB_NMI + extern int kgdb_register_nmi_console(void); diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 0555cc6..40116ce 100644 --- a/include/linux/kmod.h @@ -82196,6 +82512,19 @@ index c45c089..298841c 100644 { u32 remainder; return div_u64_rem(dividend, divisor, &remainder); +diff --git a/include/linux/memory.h b/include/linux/memory.h +index bb7384e..8b8d8d1 100644 +--- a/include/linux/memory.h ++++ b/include/linux/memory.h +@@ -35,7 +35,7 @@ struct memory_block { + }; + + int arch_get_memory_phys_device(unsigned long start_pfn); +-unsigned long __weak memory_block_size_bytes(void); ++unsigned long memory_block_size_bytes(void); + + /* These states are exposed to userspace as text strings in sysfs */ + #define MEM_ONLINE (1<<0) /* exposed to userspace */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5bba088..7ad4ae7 100644 --- a/include/linux/mempolicy.h @@ -83646,7 +83975,7 @@ index a964f72..b475afb 100644 } diff --git a/include/linux/sched.h b/include/linux/sched.h -index d7ca410..8b39a0c 100644 +index 218b058..1ce7ad0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -133,6 +133,7 @@ struct fs_struct; @@ -83892,7 +84221,7 @@ index d7ca410..8b39a0c 100644 { return tsk->pid; } -@@ -2013,6 +2127,25 @@ extern u64 sched_clock_cpu(int cpu); +@@ -2015,6 +2129,25 @@ extern u64 sched_clock_cpu(int cpu); extern void sched_clock_init(void); @@ -83918,7 +84247,7 @@ index d7ca410..8b39a0c 100644 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { -@@ -2137,7 +2270,9 @@ void yield(void); +@@ -2139,7 +2272,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -83928,7 +84257,7 @@ index d7ca410..8b39a0c 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2170,6 +2305,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2172,6 +2307,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -83936,7 +84265,7 @@ index d7ca410..8b39a0c 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2332,7 +2468,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2334,7 +2470,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -83945,7 +84274,7 @@ index d7ca410..8b39a0c 100644 extern int allow_signal(int); extern int disallow_signal(int); -@@ -2533,9 +2669,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2535,9 +2671,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -84304,6 +84633,29 @@ index 680f9a3..f13aeb0 100644 __SONET_ITEMS #undef __HANDLE_ITEM }; +diff --git a/include/linux/string.h b/include/linux/string.h +index ac889c5..0ed878d 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); + #endif + + extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, +- const void *from, size_t available); ++ const void *from, size_t available); + + /** + * strstarts - does @str start with @prefix? +@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix) + return strncmp(str, prefix, strlen(prefix)) == 0; + } + +-extern size_t memweight(const void *ptr, size_t bytes); ++size_t memweight(const void *ptr, size_t bytes); ++void memzero_explicit(void *s, size_t count); + + /** + * kbasename - return the last part of a pathname. diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h index 07d8e53..dc934c9 100644 --- a/include/linux/sunrpc/addr.h @@ -88457,7 +88809,7 @@ index e2c6853..9a6397e 100644 else new_fs = fs; diff --git a/kernel/futex.c b/kernel/futex.c -index 0b0dc02..5f3eb62 100644 +index fda2950..5f3eb62 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -54,6 +54,7 @@ @@ -88486,16 +88838,7 @@ index 0b0dc02..5f3eb62 100644 static const struct futex_q futex_q_init = { /* list gets initialized in queue_me()*/ -@@ -329,6 +330,8 @@ static void get_futex_key_refs(union futex_key *key) - case FUT_OFF_MMSHARED: - futex_get_mm(key); /* implies MB (B) */ - break; -+ default: -+ smp_mb(); /* explicit MB (B) */ - } - } - -@@ -380,6 +383,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) +@@ -382,6 +383,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) struct page *page, *page_head; int err, ro = 0; @@ -88507,7 +88850,7 @@ index 0b0dc02..5f3eb62 100644 /* * The futex address must be "naturally" aligned. */ -@@ -579,7 +587,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, +@@ -581,7 +587,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, static int get_futex_value_locked(u32 *dest, u32 __user *from) { @@ -88516,7 +88859,7 @@ index 0b0dc02..5f3eb62 100644 pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); -@@ -3020,6 +3028,7 @@ static void __init futex_detect_cmpxchg(void) +@@ -3022,6 +3028,7 @@ static void __init futex_detect_cmpxchg(void) { #ifndef CONFIG_HAVE_FUTEX_CMPXCHG u32 curval; @@ -88524,7 +88867,7 @@ index 0b0dc02..5f3eb62 100644 /* * This will fail and we want it. Some arch implementations do -@@ -3031,8 +3040,11 @@ static void __init futex_detect_cmpxchg(void) +@@ -3033,8 +3040,11 @@ static void __init futex_detect_cmpxchg(void) * implementation, the non-functional ones will return * -ENOSYS. */ @@ -93330,10 +93673,32 @@ index 114d1be..ab0350c 100644 (val << avg->factor)) >> avg->weight : (val << avg->factor); diff --git a/lib/bitmap.c b/lib/bitmap.c -index 06f7e4f..f3cf2b0 100644 +index 06f7e4f..9078e42 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c -@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, +@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst, + lower = src[off + k]; + if (left && off + k == lim - 1) + lower &= mask; +- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; ++ dst[k] = lower >> rem; ++ if (rem) ++ dst[k] |= upper << (BITS_PER_LONG - rem); + if (left && k == lim - 1) + dst[k] &= mask; + } +@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst, + upper = src[k]; + if (left && k == lim - 1) + upper &= (1UL << left) - 1; +- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; ++ dst[k + off] = upper << rem; ++ if (rem) ++ dst[k + off] |= lower >> (BITS_PER_LONG - rem); + if (left && k + off == lim - 1) + dst[k + off] &= (1UL << left) - 1; + } +@@ -422,7 +426,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, { int c, old_c, totaldigits, ndigits, nchunks, nbits; u32 chunk; @@ -93342,7 +93707,7 @@ index 06f7e4f..f3cf2b0 100644 bitmap_zero(maskp, nmaskbits); -@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf, +@@ -507,7 +511,7 @@ int bitmap_parse_user(const char __user *ubuf, { if (!access_ok(VERIFY_READ, ubuf, ulen)) return -EFAULT; @@ -93351,7 +93716,7 @@ index 06f7e4f..f3cf2b0 100644 ulen, 1, maskp, nmaskbits); } -@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, +@@ -598,7 +602,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, { unsigned a, b; int c, old_c, totaldigits; @@ -93360,7 +93725,7 @@ index 06f7e4f..f3cf2b0 100644 int exp_digit, in_range; totaldigits = c = 0; -@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf, +@@ -698,7 +702,7 @@ int bitmap_parselist_user(const char __user *ubuf, { if (!access_ok(VERIFY_READ, ubuf, ulen)) return -EFAULT; @@ -93941,10 +94306,33 @@ index 0922579..9d7adb9 100644 #endif } diff --git a/lib/string.c b/lib/string.c -index e5878de..315fad2 100644 +index e5878de..64941b2 100644 --- a/lib/string.c +++ b/lib/string.c -@@ -789,9 +789,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) +@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count) + EXPORT_SYMBOL(memset); + #endif + ++/** ++ * memzero_explicit - Fill a region of memory (e.g. sensitive ++ * keying data) with 0s. ++ * @s: Pointer to the start of the area. ++ * @count: The size of the area. ++ * ++ * memzero_explicit() doesn't need an arch-specific version as ++ * it just invokes the one of memset() implicitly. ++ */ ++void memzero_explicit(void *s, size_t count) ++{ ++ memset(s, 0, count); ++ OPTIMIZER_HIDE_VAR(s); ++} ++EXPORT_SYMBOL(memzero_explicit); ++ + #ifndef __HAVE_ARCH_MEMCPY + /** + * memcpy - Copy one area of memory to another +@@ -789,9 +805,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) return check_bytes8(start, value, bytes); value64 = value; @@ -94697,7 +95085,7 @@ index 33365e9..2234ef9 100644 } unset_migratetype_isolate(page, MIGRATE_MOVABLE); diff --git a/mm/memory.c b/mm/memory.c -index 492e36f..3771c0a 100644 +index 492e36f..b153792 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, @@ -94734,7 +95122,32 @@ index 492e36f..3771c0a 100644 } /* -@@ -1636,12 +1642,6 @@ no_page_table: +@@ -679,10 +685,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, + * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y + */ + if (vma->vm_ops) +- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n", ++ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n", + vma->vm_ops->fault); + if (vma->vm_file) +- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n", ++ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n", + vma->vm_file->f_op->mmap); + dump_stack(); + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); +@@ -1137,8 +1143,10 @@ again: + if (unlikely(page_mapcount(page) < 0)) + print_bad_pte(vma, addr, ptent, page); + force_flush = !__tlb_remove_page(tlb, page); +- if (force_flush) ++ if (force_flush) { ++ addr += PAGE_SIZE; + break; ++ } + continue; + } + /* +@@ -1636,12 +1644,6 @@ no_page_table: return page; } @@ -94747,7 +95160,7 @@ index 492e36f..3771c0a 100644 /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task -@@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +@@ -1728,10 +1730,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, i = 0; @@ -94760,7 +95173,7 @@ index 492e36f..3771c0a 100644 if (!vma && in_gate_area(mm, start)) { unsigned long pg = start & PAGE_MASK; pgd_t *pgd; -@@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +@@ -1780,7 +1782,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, goto next_page; } @@ -94769,7 +95182,7 @@ index 492e36f..3771c0a 100644 (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; -@@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +@@ -1809,11 +1811,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; @@ -94781,7 +95194,7 @@ index 492e36f..3771c0a 100644 if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) -@@ -1893,7 +1888,7 @@ next_page: +@@ -1893,7 +1890,7 @@ next_page: start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages && start < vma->vm_end); @@ -94790,7 +95203,7 @@ index 492e36f..3771c0a 100644 return i; } EXPORT_SYMBOL(__get_user_pages); -@@ -2105,6 +2100,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, +@@ -2105,6 +2102,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, page_add_file_rmap(page); set_pte_at(mm, addr, pte, mk_pte(page, prot)); @@ -94801,7 +95214,7 @@ index 492e36f..3771c0a 100644 retval = 0; pte_unmap_unlock(pte, ptl); return retval; -@@ -2149,9 +2148,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, +@@ -2149,9 +2150,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, if (!page_count(page)) return -EINVAL; if (!(vma->vm_flags & VM_MIXEDMAP)) { @@ -94823,7 +95236,7 @@ index 492e36f..3771c0a 100644 } return insert_page(vma, addr, page, vma->vm_page_prot); } -@@ -2234,6 +2245,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, +@@ -2234,6 +2247,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); @@ -94831,7 +95244,7 @@ index 492e36f..3771c0a 100644 if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; -@@ -2481,7 +2493,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, +@@ -2481,7 +2495,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, BUG_ON(pud_huge(*pud)); @@ -94842,7 +95255,7 @@ index 492e36f..3771c0a 100644 if (!pmd) return -ENOMEM; do { -@@ -2501,7 +2515,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, +@@ -2501,7 +2517,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long next; int err; @@ -94853,7 +95266,7 @@ index 492e36f..3771c0a 100644 if (!pud) return -ENOMEM; do { -@@ -2591,6 +2607,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo +@@ -2591,6 +2609,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo copy_user_highpage(dst, src, va, vma); } @@ -95040,7 +95453,7 @@ index 492e36f..3771c0a 100644 /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address -@@ -2815,6 +3011,12 @@ gotten: +@@ -2815,6 +3013,12 @@ gotten: */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { @@ -95053,7 +95466,7 @@ index 492e36f..3771c0a 100644 if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, MM_FILEPAGES); -@@ -2866,6 +3068,10 @@ gotten: +@@ -2866,6 +3070,10 @@ gotten: page_remove_rmap(old_page); } @@ -95064,7 +95477,7 @@ index 492e36f..3771c0a 100644 /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; -@@ -3143,6 +3349,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3143,6 +3351,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); @@ -95076,7 +95489,7 @@ index 492e36f..3771c0a 100644 unlock_page(page); if (page != swapcache) { /* -@@ -3166,6 +3377,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3166,6 +3379,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -95088,7 +95501,7 @@ index 492e36f..3771c0a 100644 unlock: pte_unmap_unlock(page_table, ptl); out: -@@ -3185,40 +3401,6 @@ out_release: +@@ -3185,40 +3403,6 @@ out_release: } /* @@ -95129,7 +95542,7 @@ index 492e36f..3771c0a 100644 * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. -@@ -3227,27 +3409,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3227,27 +3411,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { @@ -95162,7 +95575,7 @@ index 492e36f..3771c0a 100644 if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); -@@ -3271,6 +3449,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3271,6 +3451,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*page_table)) goto release; @@ -95174,7 +95587,7 @@ index 492e36f..3771c0a 100644 inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); setpte: -@@ -3278,6 +3461,12 @@ setpte: +@@ -3278,6 +3463,12 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -95187,7 +95600,7 @@ index 492e36f..3771c0a 100644 unlock: pte_unmap_unlock(page_table, ptl); return 0; -@@ -3422,6 +3611,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3422,6 +3613,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { @@ -95200,7 +95613,7 @@ index 492e36f..3771c0a 100644 flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) -@@ -3443,6 +3638,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3443,6 +3640,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, page_table); @@ -95215,7 +95628,7 @@ index 492e36f..3771c0a 100644 } else { if (cow_page) mem_cgroup_uncharge_page(cow_page); -@@ -3690,6 +3893,12 @@ static int handle_pte_fault(struct mm_struct *mm, +@@ -3690,6 +3895,12 @@ static int handle_pte_fault(struct mm_struct *mm, if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } @@ -95228,7 +95641,7 @@ index 492e36f..3771c0a 100644 unlock: pte_unmap_unlock(pte, ptl); return 0; -@@ -3706,9 +3915,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3706,9 +3917,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd; pte_t *pte; @@ -95270,7 +95683,7 @@ index 492e36f..3771c0a 100644 pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) -@@ -3836,6 +4077,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +@@ -3836,6 +4079,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } @@ -95294,7 +95707,7 @@ index 492e36f..3771c0a 100644 #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED -@@ -3866,6 +4124,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +@@ -3866,6 +4126,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } @@ -95325,7 +95738,7 @@ index 492e36f..3771c0a 100644 #endif /* __PAGETABLE_PMD_FOLDED */ #if !defined(__HAVE_ARCH_GATE_AREA) -@@ -3879,7 +4161,7 @@ static int __init gate_vma_init(void) +@@ -3879,7 +4163,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; @@ -95334,7 +95747,7 @@ index 492e36f..3771c0a 100644 return 0; } -@@ -4013,8 +4295,8 @@ out: +@@ -4013,8 +4297,8 @@ out: return ret; } @@ -95345,7 +95758,7 @@ index 492e36f..3771c0a 100644 { resource_size_t phys_addr; unsigned long prot = 0; -@@ -4040,8 +4322,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys); +@@ -4040,8 +4324,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys); * Access another process' address space as given in mm. If non-NULL, use the * given task for page fault accounting. */ @@ -95356,7 +95769,7 @@ index 492e36f..3771c0a 100644 { struct vm_area_struct *vma; void *old_buf = buf; -@@ -4049,7 +4331,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, +@@ -4049,7 +4333,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, down_read(&mm->mmap_sem); /* ignore errors, just check how much was successfully transferred */ while (len) { @@ -95365,7 +95778,7 @@ index 492e36f..3771c0a 100644 void *maddr; struct page *page = NULL; -@@ -4108,8 +4390,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, +@@ -4108,8 +4392,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, * * The caller must hold a reference on @mm. */ @@ -95376,7 +95789,7 @@ index 492e36f..3771c0a 100644 { return __access_remote_vm(NULL, mm, addr, buf, len, write); } -@@ -4119,11 +4401,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr, +@@ -4119,11 +4403,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr, * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ @@ -97593,7 +98006,7 @@ index f0d698b..7037c25 100644 return -ENOMEM; diff --git a/mm/slab.c b/mm/slab.c -index 0b1c2a5..819c6bc 100644 +index 0b1c2a5..4deff8e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) @@ -97646,29 +98059,28 @@ index 0b1c2a5..819c6bc 100644 slab_early_init = 0; -@@ -3477,6 +3481,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, +@@ -3477,6 +3481,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); + +#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ if (pax_sanitize_slab) { -+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) { -+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size); ++ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE)) ++ STATS_INC_NOT_SANITIZED(cachep); ++ else { ++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size); + -+ if (cachep->ctor) -+ cachep->ctor(objp); ++ if (cachep->ctor) ++ cachep->ctor(objp); + -+ STATS_INC_SANITIZED(cachep); -+ } else -+ STATS_INC_NOT_SANITIZED(cachep); ++ STATS_INC_SANITIZED(cachep); + } +#endif + kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); -@@ -3705,6 +3724,7 @@ void kfree(const void *objp) +@@ -3705,6 +3723,7 @@ void kfree(const void *objp) if (unlikely(ZERO_OR_NULL_PTR(objp))) return; @@ -97676,7 +98088,7 @@ index 0b1c2a5..819c6bc 100644 local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); -@@ -4146,14 +4166,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) +@@ -4146,14 +4165,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) } /* cpu stats */ { @@ -97703,7 +98115,7 @@ index 0b1c2a5..819c6bc 100644 #endif } -@@ -4374,13 +4402,69 @@ static const struct file_operations proc_slabstats_operations = { +@@ -4374,13 +4401,69 @@ static const struct file_operations proc_slabstats_operations = { static int __init slab_proc_init(void) { #ifdef CONFIG_DEBUG_SLAB_LEAK @@ -97775,10 +98187,10 @@ index 0b1c2a5..819c6bc 100644 * ksize - get the actual amount of memory allocated for a given object * @objp: Pointer to the object diff --git a/mm/slab.h b/mm/slab.h -index 8184a7c..ab27737 100644 +index 8184a7c..81ed62c 100644 --- a/mm/slab.h +++ b/mm/slab.h -@@ -32,6 +32,15 @@ extern struct list_head slab_caches; +@@ -32,6 +32,20 @@ extern struct list_head slab_caches; /* The slab cache that manages slab cache information */ extern struct kmem_cache *kmem_cache; @@ -97788,13 +98200,18 @@ index 8184a7c..ab27737 100644 +#else +#define PAX_MEMORY_SANITIZE_VALUE '\xff' +#endif -+extern bool pax_sanitize_slab; ++enum pax_sanitize_mode { ++ PAX_SANITIZE_SLAB_OFF = 0, ++ PAX_SANITIZE_SLAB_FAST, ++ PAX_SANITIZE_SLAB_FULL, ++}; ++extern enum pax_sanitize_mode pax_sanitize_slab; +#endif + unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size); -@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, +@@ -67,7 +81,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, /* Legal flag mask for kmem_cache_create(), for various configurations */ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ @@ -97804,7 +98221,7 @@ index 8184a7c..ab27737 100644 #if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) -@@ -257,6 +267,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) +@@ -257,6 +272,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) return s; page = virt_to_head_page(x); @@ -97815,10 +98232,10 @@ index 8184a7c..ab27737 100644 if (slab_equal_or_root(cachep, s)) return cachep; diff --git a/mm/slab_common.c b/mm/slab_common.c -index f149e67..b366f92 100644 +index f149e67..97a8b4b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c -@@ -23,11 +23,22 @@ +@@ -23,11 +23,35 @@ #include "slab.h" @@ -97829,20 +98246,47 @@ index f149e67..b366f92 100644 struct kmem_cache *kmem_cache; +#ifdef CONFIG_PAX_MEMORY_SANITIZE -+bool pax_sanitize_slab __read_only = true; ++enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST; +static int __init pax_sanitize_slab_setup(char *str) +{ -+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0); -+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis"); -+ return 1; ++ if (!str) ++ return 0; ++ ++ if (!strcmp(str, "0") || !strcmp(str, "off")) { ++ pr_info("PaX slab sanitization: %s\n", "disabled"); ++ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF; ++ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) { ++ pr_info("PaX slab sanitization: %s\n", "fast"); ++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST; ++ } else if (!strcmp(str, "full")) { ++ pr_info("PaX slab sanitization: %s\n", "full"); ++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL; ++ } else ++ pr_err("PaX slab sanitization: unsupported option '%s'\n", str); ++ ++ return 0; +} -+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup); ++early_param("pax_sanitize_slab", pax_sanitize_slab_setup); +#endif + #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, size_t size) -@@ -225,7 +236,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, +@@ -200,6 +224,13 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, + */ + flags &= CACHE_CREATE_MASK; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU)) ++ flags |= SLAB_NO_SANITIZE; ++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL) ++ flags &= ~SLAB_NO_SANITIZE; ++#endif ++ + s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); + if (s) + goto out_unlock; +@@ -225,7 +256,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, if (err) goto out_free_cache; @@ -97851,7 +98295,7 @@ index f149e67..b366f92 100644 list_add(&s->list, &slab_caches); memcg_register_cache(s); -@@ -278,8 +289,7 @@ void kmem_cache_destroy(struct kmem_cache *s) +@@ -278,8 +309,7 @@ void kmem_cache_destroy(struct kmem_cache *s) get_online_cpus(); mutex_lock(&slab_mutex); @@ -97861,7 +98305,7 @@ index f149e67..b366f92 100644 list_del(&s->list); if (!__kmem_cache_shutdown(s)) { -@@ -326,7 +336,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz +@@ -326,7 +356,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", name, size, err); @@ -97870,7 +98314,7 @@ index f149e67..b366f92 100644 } struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, -@@ -339,7 +349,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, +@@ -339,7 +369,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, create_boot_cache(s, name, size, flags); list_add(&s->list, &slab_caches); @@ -97879,7 +98323,7 @@ index f149e67..b366f92 100644 return s; } -@@ -351,6 +361,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; +@@ -351,6 +381,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; EXPORT_SYMBOL(kmalloc_dma_caches); #endif @@ -97891,7 +98335,7 @@ index f149e67..b366f92 100644 /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power -@@ -415,6 +430,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) +@@ -415,6 +450,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) return kmalloc_dma_caches[index]; #endif @@ -97905,7 +98349,7 @@ index f149e67..b366f92 100644 return kmalloc_caches[index]; } -@@ -471,7 +493,7 @@ void __init create_kmalloc_caches(unsigned long flags) +@@ -471,7 +513,7 @@ void __init create_kmalloc_caches(unsigned long flags) for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { if (!kmalloc_caches[i]) { kmalloc_caches[i] = create_kmalloc_cache(NULL, @@ -97914,7 +98358,7 @@ index f149e67..b366f92 100644 } /* -@@ -480,10 +502,10 @@ void __init create_kmalloc_caches(unsigned long flags) +@@ -480,10 +522,10 @@ void __init create_kmalloc_caches(unsigned long flags) * earlier power of two caches */ if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) @@ -97927,7 +98371,7 @@ index f149e67..b366f92 100644 } /* Kmalloc array is now usable */ -@@ -516,6 +538,23 @@ void __init create_kmalloc_caches(unsigned long flags) +@@ -516,6 +558,23 @@ void __init create_kmalloc_caches(unsigned long flags) } } #endif @@ -97951,7 +98395,7 @@ index f149e67..b366f92 100644 } #endif /* !CONFIG_SLOB */ -@@ -556,6 +595,9 @@ void print_slabinfo_header(struct seq_file *m) +@@ -556,6 +615,9 @@ void print_slabinfo_header(struct seq_file *m) seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); @@ -97962,7 +98406,7 @@ index f149e67..b366f92 100644 seq_putc(m, '\n'); } diff --git a/mm/slob.c b/mm/slob.c -index 4bf8809..98a6914 100644 +index 4bf8809..a0a0b9f 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) @@ -98043,6 +98487,15 @@ index 4bf8809..98a6914 100644 INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); +@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) + /* + * slob_free: entry point into the slob allocator. + */ +-static void slob_free(void *block, int size) ++static void slob_free(struct kmem_cache *c, void *block, int size) + { + struct page *sp; + slob_t *prev, *next, *b = (slob_t *)block; @@ -359,12 +363,15 @@ static void slob_free(void *block, int size) if (slob_page_free(sp)) clear_slob_page_free(sp); @@ -98055,7 +98508,7 @@ index 4bf8809..98a6914 100644 } +#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ if (pax_sanitize_slab) ++ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE))) + memset(block, PAX_MEMORY_SANITIZE_VALUE, size); +#endif + @@ -98136,7 +98589,7 @@ index 4bf8809..98a6914 100644 - slob_free(m, *m + align); - } else + slob_t *m = (slob_t *)(block - align); -+ slob_free(m, m[0].units + align); ++ slob_free(NULL, m, m[0].units + align); + } else { + __ClearPageSlab(sp); + page_mapcount_reset(sp); @@ -98280,24 +98733,34 @@ index 4bf8809..98a6914 100644 if (b && c->ctor) c->ctor(b); -@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); +@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) + EXPORT_SYMBOL(kmem_cache_alloc_node); + #endif - static void __kmem_cache_free(void *b, int size) +-static void __kmem_cache_free(void *b, int size) ++static void __kmem_cache_free(struct kmem_cache *c, void *b, int size) { - if (size < PAGE_SIZE) +- slob_free(b, size); + struct page *sp; + + sp = virt_to_page(b); + BUG_ON(!PageSlab(sp)); + if (!sp->private) - slob_free(b, size); ++ slob_free(c, b, size); else - slob_free_pages(b, get_order(size)); + slob_free_pages(sp, get_order(size)); } static void kmem_rcu_free(struct rcu_head *head) -@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head) +@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head) + struct slob_rcu *slob_rcu = (struct slob_rcu *)head; + void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); + +- __kmem_cache_free(b, slob_rcu->size); ++ __kmem_cache_free(NULL, b, slob_rcu->size); + } void kmem_cache_free(struct kmem_cache *c, void *b) { @@ -98320,7 +98783,7 @@ index 4bf8809..98a6914 100644 call_rcu(&slob_rcu->head, kmem_rcu_free); } else { - __kmem_cache_free(b, c->size); -+ __kmem_cache_free(b, size); ++ __kmem_cache_free(c, b, size); } +#ifdef CONFIG_PAX_USERCOPY_SLABS @@ -98333,7 +98796,7 @@ index 4bf8809..98a6914 100644 EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slub.c b/mm/slub.c -index 7611f14..dfe9298 100644 +index 7611f14..3d5e216 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -207,7 +207,7 @@ struct track { @@ -98359,7 +98822,7 @@ index 7611f14..dfe9298 100644 slab_free_hook(s, x); +#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) { ++ if (!(s->flags & SLAB_NO_SANITIZE)) { + memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size); + if (s->ctor) + s->ctor(x); @@ -98383,7 +98846,7 @@ index 7611f14..dfe9298 100644 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || +#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) || ++ (!(flags & SLAB_NO_SANITIZE)) || +#endif s->ctor)) { /* @@ -98538,7 +99001,7 @@ index 7611f14..dfe9298 100644 } SLAB_ATTR_RO(aliases); -@@ -4605,6 +4678,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) +@@ -4605,6 +4678,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) SLAB_ATTR_RO(cache_dma); #endif @@ -98550,20 +99013,31 @@ index 7611f14..dfe9298 100644 +SLAB_ATTR_RO(usercopy); +#endif + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++static ssize_t sanitize_show(struct kmem_cache *s, char *buf) ++{ ++ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE)); ++} ++SLAB_ATTR_RO(sanitize); ++#endif ++ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); -@@ -4939,6 +5020,9 @@ static struct attribute *slab_attrs[] = { +@@ -4939,6 +5028,12 @@ static struct attribute *slab_attrs[] = { #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif +#ifdef CONFIG_PAX_USERCOPY_SLABS + &usercopy_attr.attr, +#endif ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ &sanitize_attr.attr, ++#endif #ifdef CONFIG_NUMA &remote_node_defrag_ratio_attr.attr, #endif -@@ -5171,6 +5255,7 @@ static char *create_unique_id(struct kmem_cache *s) +@@ -5171,6 +5266,7 @@ static char *create_unique_id(struct kmem_cache *s) return name; } @@ -98571,7 +99045,7 @@ index 7611f14..dfe9298 100644 static int sysfs_slab_add(struct kmem_cache *s) { int err; -@@ -5228,6 +5313,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) +@@ -5228,6 +5324,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) kobject_del(&s->kobj); kobject_put(&s->kobj); } @@ -98579,7 +99053,7 @@ index 7611f14..dfe9298 100644 /* * Need to buffer aliases during bootup until sysfs becomes -@@ -5241,6 +5327,7 @@ struct saved_alias { +@@ -5241,6 +5338,7 @@ struct saved_alias { static struct saved_alias *alias_list; @@ -98587,7 +99061,7 @@ index 7611f14..dfe9298 100644 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) { struct saved_alias *al; -@@ -5263,6 +5350,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) +@@ -5263,6 +5361,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) alias_list = al; return 0; } @@ -99509,10 +99983,10 @@ index 7552f9e..074ce29 100644 err = -EFAULT; break; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c -index 6afa3b4..7a14180 100644 +index 0007c9e..f11541b 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c -@@ -3740,8 +3740,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, +@@ -3736,8 +3736,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, break; case L2CAP_CONF_RFC: @@ -100952,7 +101426,7 @@ index 0d1e2cb..4501a2c 100644 void inet_get_local_port_range(struct net *net, int *low, int *high) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c -index 8b9cf27..0d8d592 100644 +index 8b9cf27..9c17cab 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -18,6 +18,7 @@ @@ -100967,7 +101441,7 @@ index 8b9cf27..0d8d592 100644 return inet_ehashfn(net, laddr, lport, faddr, fport); } -+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); ++extern void gr_update_task_in_ip_table(const struct inet_sock *inet); + /* * Allocate and initialize a new local port bind bucket. @@ -100976,7 +101450,7 @@ index 8b9cf27..0d8d592 100644 twrefcnt += inet_twsk_bind_unhash(tw, hinfo); spin_unlock(&head->lock); -+ gr_update_task_in_ip_table(current, inet_sk(sk)); ++ gr_update_task_in_ip_table(inet_sk(sk)); + if (tw) { inet_twsk_deschedule(tw, death_row); @@ -102786,10 +103260,45 @@ index 20b63d2..31a777d 100644 kfree_skb(skb); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c -index 5f8e128..865d38e 100644 +index 5f8e128..d32ac8c 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c -@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) +@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) + case IPPROTO_DCCP: + if (!onlyproto && (nh + offset + 4 < skb->data || + pskb_may_pull(skb, nh + offset + 4 - skb->data))) { +- __be16 *ports = (__be16 *)exthdr; ++ __be16 *ports; + ++ nh = skb_network_header(skb); ++ ports = (__be16 *)(nh + offset); + fl6->fl6_sport = ports[!!reverse]; + fl6->fl6_dport = ports[!reverse]; + } +@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) + + case IPPROTO_ICMPV6: + if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { +- u8 *icmp = (u8 *)exthdr; ++ u8 *icmp; + ++ nh = skb_network_header(skb); ++ icmp = (u8 *)(nh + offset); + fl6->fl6_icmp_type = icmp[0]; + fl6->fl6_icmp_code = icmp[1]; + } +@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) + case IPPROTO_MH: + if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { + struct ip6_mh *mh; +- mh = (struct ip6_mh *)exthdr; + ++ nh = skb_network_header(skb); ++ mh = (struct ip6_mh *)(nh + offset); + fl6->fl6_mh_type = mh->ip6mh_type; + } + fl6->flowi6_proto = nexthdr; +@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) } } @@ -102803,7 +103312,7 @@ index 5f8e128..865d38e 100644 return dst_entries_get_fast(ops) > ops->gc_thresh * 2; } -@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = { +@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = { static int __net_init xfrm6_net_init(struct net *net) { @@ -102828,7 +103337,7 @@ index 5f8e128..865d38e 100644 if (!hdr) goto err_reg; -@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net) +@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net) return 0; err_reg: @@ -106237,10 +106746,10 @@ index 8fac3fd..32ff38d 100644 unsigned int secindex_strings; diff --git a/security/Kconfig b/security/Kconfig -index beb86b5..9becb4a 100644 +index beb86b5..e66c504 100644 --- a/security/Kconfig +++ b/security/Kconfig -@@ -4,6 +4,965 @@ +@@ -4,6 +4,969 @@ menu "Security options" @@ -107003,10 +107512,14 @@ index beb86b5..9becb4a 100644 + and you are advised to test this feature on your expected workload + before deploying it. + ++ The slab sanitization feature excludes a few slab caches per default ++ for performance reasons. To extend the feature to cover those as ++ well, pass "pax_sanitize_slab=full" as kernel command line parameter. ++ + To reduce the performance penalty by sanitizing pages only, albeit + limiting the effectiveness of this feature at the same time, slab -+ sanitization can be disabled with the kernel commandline parameter -+ "pax_sanitize_slab=0". ++ sanitization can be disabled with the kernel command line parameter ++ "pax_sanitize_slab=off". + + Note that this feature does not protect data stored in live pages, + e.g., process memory swapped to disk may stay there for a long time. @@ -107206,7 +107719,7 @@ index beb86b5..9becb4a 100644 source security/keys/Kconfig config SECURITY_DMESG_RESTRICT -@@ -103,7 +1062,7 @@ config INTEL_TXT +@@ -103,7 +1066,7 @@ config INTEL_TXT config LSM_MMAP_MIN_ADDR int "Low address space for LSM to protect from user allocation" depends on SECURITY && SECURITY_SELINUX @@ -108024,7 +108537,7 @@ index af49721..e85058e 100644 if (err < 0) return err; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c -index 01a5e05..c6bb425 100644 +index 566b0f6..636730b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -2811,11 +2811,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, @@ -116371,10 +116884,10 @@ index 0000000..4378111 +} diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data new file mode 100644 -index 0000000..d832fcc +index 0000000..2f37382 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data -@@ -0,0 +1,5991 @@ +@@ -0,0 +1,5996 @@ +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL +ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL @@ -117364,6 +117877,7 @@ index 0000000..d832fcc +page_offset_11120 page_offset 0 11120 NULL +cea_db_payload_len_11124 cea_db_payload_len 0 11124 NULL nohasharray +tracing_buffers_read_11124 tracing_buffers_read 3 11124 &cea_db_payload_len_11124 ++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL +snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL +comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL @@ -117725,6 +118239,7 @@ index 0000000..d832fcc +snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801 +security_inode_rename_14805 security_inode_rename 0 14805 NULL +xfs_btree_kill_iroot_14824 xfs_btree_kill_iroot 0 14824 NULL ++do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL +mrp_attr_create_14853 mrp_attr_create 3 14853 NULL +lcd_write_14857 lcd_write 3 14857 NULL +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL @@ -118062,7 +118577,8 @@ index 0000000..d832fcc +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL +debug_output_18575 debug_output 3 18575 NULL +xfs_btree_read_bufl_18597 xfs_btree_read_bufl 0 18597 NULL -+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL ++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray ++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600 +iowarrior_write_18604 iowarrior_write 3 18604 NULL +nvc0_ram_create__18624 nvc0_ram_create_ 4 18624 NULL +from_buffer_18625 from_buffer 3 18625 NULL @@ -120694,6 +121210,7 @@ index 0000000..d832fcc +nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL +newpart_47485 newpart 6-4 47485 NULL +core_sys_select_47494 core_sys_select 1 47494 NULL ++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL +unlink_simple_47506 unlink_simple 3 47506 NULL +pstore_decompress_47510 pstore_decompress 0 47510 NULL +__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL @@ -122031,6 +122548,7 @@ index 0000000..d832fcc +insert_one_name_61668 insert_one_name 7 61668 NULL +qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL +lock_loop_61681 lock_loop 1 61681 NULL ++__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL +filter_read_61692 filter_read 3 61692 NULL +iov_length_61716 iov_length 0 61716 NULL +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL @@ -123835,11 +124353,49 @@ index 0a578fe..b81f62d 100644 0; \ }) +diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c +index 714b949..1f0dc1e 100644 +--- a/virt/kvm/iommu.c ++++ b/virt/kvm/iommu.c +@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, + gfn_t base_gfn, unsigned long npages); + + static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, +- unsigned long size) ++ unsigned long npages) + { + gfn_t end_gfn; + pfn_t pfn; + + pfn = gfn_to_pfn_memslot(slot, gfn); +- end_gfn = gfn + (size >> PAGE_SHIFT); ++ end_gfn = gfn + npages; + gfn += 1; + + if (is_error_noslot_pfn(pfn)) +@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) + * Pin all pages we are about to map in memory. This is + * important because we unmap and unpin in 4kb steps later. + */ +- pfn = kvm_pin_pages(slot, gfn, page_size); ++ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); + if (is_error_noslot_pfn(pfn)) { + gfn += 1; + continue; +@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) + if (r) { + printk(KERN_ERR "kvm_iommu_map_address:" + "iommu failed to map pfn=%llx\n", pfn); +- kvm_unpin_pages(kvm, pfn, page_size); ++ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); + goto unmap_pages; + } + diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 03a0381..8b31923 100644 +index 6611253..eb4bc0f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c -@@ -76,12 +76,17 @@ LIST_HEAD(vm_list); +@@ -77,12 +77,17 @@ LIST_HEAD(vm_list); static cpumask_var_t cpus_hardware_enabled; static int kvm_usage_count = 0; @@ -123859,7 +124415,7 @@ index 03a0381..8b31923 100644 struct dentry *kvm_debugfs_dir; -@@ -758,7 +763,7 @@ int __kvm_set_memory_region(struct kvm *kvm, +@@ -768,7 +773,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* We can read the guest memory with __xxx_user() later on. */ if ((mem->slot < KVM_USER_MEM_SLOTS) && ((mem->userspace_addr & (PAGE_SIZE - 1)) || @@ -123868,7 +124424,7 @@ index 03a0381..8b31923 100644 (void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; -@@ -1615,9 +1620,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached); +@@ -1625,9 +1630,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) { @@ -123888,7 +124444,7 @@ index 03a0381..8b31923 100644 } EXPORT_SYMBOL_GPL(kvm_clear_guest_page); -@@ -1867,7 +1880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) +@@ -1877,7 +1890,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) return 0; } @@ -123897,7 +124453,7 @@ index 03a0381..8b31923 100644 .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, #ifdef CONFIG_COMPAT -@@ -2532,7 +2545,7 @@ out: +@@ -2545,7 +2558,7 @@ out: } #endif @@ -123906,7 +124462,7 @@ index 03a0381..8b31923 100644 .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, #ifdef CONFIG_COMPAT -@@ -2632,7 +2645,7 @@ out: +@@ -2645,7 +2658,7 @@ out: return r; } @@ -123915,7 +124471,7 @@ index 03a0381..8b31923 100644 .unlocked_ioctl = kvm_dev_ioctl, .compat_ioctl = kvm_dev_ioctl, .llseek = noop_llseek, -@@ -2658,7 +2671,7 @@ static void hardware_enable_nolock(void *junk) +@@ -2671,7 +2684,7 @@ static void hardware_enable_nolock(void *junk) if (r) { cpumask_clear_cpu(cpu, cpus_hardware_enabled); @@ -123924,7 +124480,7 @@ index 03a0381..8b31923 100644 printk(KERN_INFO "kvm: enabling virtualization on " "CPU%d failed\n", cpu); } -@@ -2714,10 +2727,10 @@ static int hardware_enable_all(void) +@@ -2727,10 +2740,10 @@ static int hardware_enable_all(void) kvm_usage_count++; if (kvm_usage_count == 1) { @@ -123937,7 +124493,7 @@ index 03a0381..8b31923 100644 hardware_disable_all_nolock(); r = -EBUSY; } -@@ -3121,7 +3134,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, +@@ -3134,7 +3147,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, kvm_arch_vcpu_put(vcpu); } @@ -123946,7 +124502,7 @@ index 03a0381..8b31923 100644 struct module *module) { int r; -@@ -3168,7 +3181,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -3181,7 +3194,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, @@ -123955,7 +124511,7 @@ index 03a0381..8b31923 100644 if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; -@@ -3178,9 +3191,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -3191,9 +3204,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (r) goto out_free; @@ -123967,7 +124523,7 @@ index 03a0381..8b31923 100644 r = misc_register(&kvm_dev); if (r) { -@@ -3190,9 +3205,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -3203,9 +3218,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, register_syscore_ops(&kvm_syscore_ops); |