aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-virt-grsec
diff options
context:
space:
mode:
authorLeonardo Arena <rnalrd@alpinelinux.org>2014-07-09 06:17:57 +0000
committerLeonardo Arena <rnalrd@alpinelinux.org>2014-07-09 06:17:57 +0000
commit9b0cdbdd937a3a8afc21691baa9290836ff545f6 (patch)
tree6d8e575bf2d90b95791dd65509bdb194fee3646f /main/linux-virt-grsec
parenta787ba293c40e01f07dc98369a2d05007075353f (diff)
downloadaports-9b0cdbdd937a3a8afc21691baa9290836ff545f6.tar.bz2
aports-9b0cdbdd937a3a8afc21691baa9290836ff545f6.tar.xz
main/linux-virt-grsec: upgrade to 3.4.11
Diffstat (limited to 'main/linux-virt-grsec')
-rw-r--r--main/linux-virt-grsec/APKBUILD16
-rw-r--r--main/linux-virt-grsec/grsecurity-3.0-3.14.11-201407072045.patch (renamed from main/linux-virt-grsec/grsecurity-3.0-3.14.8-201406191347.patch)1469
2 files changed, 799 insertions, 686 deletions
diff --git a/main/linux-virt-grsec/APKBUILD b/main/linux-virt-grsec/APKBUILD
index 4bd481251a..b3d4219879 100644
--- a/main/linux-virt-grsec/APKBUILD
+++ b/main/linux-virt-grsec/APKBUILD
@@ -3,7 +3,7 @@
_flavor=virt-grsec
pkgname=linux-${_flavor}
-pkgver=3.14.8
+pkgver=3.14.11
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -18,7 +18,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-3.0-$pkgver-201406191347.patch
+ grsecurity-3.0-$pkgver-201407072045.patch
fix-memory-map-for-PIE-applications.patch
imx6q-no-unclocked-sleep.patch
@@ -146,22 +146,22 @@ dev() {
}
md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz
-f612535d2c5d954b5e885757c387ae54 patch-3.14.8.xz
-f97092de8a6fa522ab9c8c510299274a grsecurity-3.0-3.14.8-201406191347.patch
+5cf3d2cb0f552c2c6faf829b6630e84f patch-3.14.11.xz
+53571da447f6543f8741e8c998a01e4f grsecurity-3.0-3.14.11-201407072045.patch
c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch
74884a16fa9c58e0cabfaf57c8b64678 kernelconfig.x86
ef60383e07d9e7df6c474a03f3f56782 kernelconfig.x86_64"
sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz
-0edab0f772836162e5e57ef294d83e88153c15a12f394914c6a25b49e408e8f1 patch-3.14.8.xz
-aa9d8c11e1c1790e71088ae0a6494b8e44b0734f8e5d4fdeb9b98e0c77427406 grsecurity-3.0-3.14.8-201406191347.patch
+3f290fb547cb4afe23bf520c8c863b6d1e090814f4a6fa0080ed51b4afd9a409 patch-3.14.11.xz
+b9f3eee998c12873b3b4263522c4faaf1c3a1536b513d553377d4b4dc07b9bb5 grsecurity-3.0-3.14.11-201407072045.patch
500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch
21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch
0afbfb07b8c1eaf741593da97ad15ab34196afe541a82efc66cb8648c36c6c68 kernelconfig.x86
92aa8a3f494732762deec3adfe34b0578bf86310c45eafb678c3c518e6ef578f kernelconfig.x86_64"
sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz
-a71fdb5391d664ecccef6602df638588e6202992415a788ad85fab9878ec6b76034c37de824069cfc6d6d502a1fab0eba98c69170f410d28951335e19d94db72 patch-3.14.8.xz
-4e9c3e5e6be903cf523160d161633fbe0cbc17d0cb4c67284a5c8c82ca999c46b84bcade802425e25c572256fc016169679507f1ca0cd801c2197cc7b6caa2e6 grsecurity-3.0-3.14.8-201406191347.patch
+fb4dca2cf832b04896f4c052ea84eab501c459bf27030b81a88b288d09d320b86254b7e995ae1931c6083ae4c88f62e4ba1976ce2254d88645f9e95a253d19e4 patch-3.14.11.xz
+c02ef0f5df3231c3cdb9ebe4aae360ec950a2f6cb6ef11eccaf9736abe71c90cf4a163324ff515aaa1279a57ab70481cb9323dc5896563c716a5fd8461306632 grsecurity-3.0-3.14.11-201407072045.patch
4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch
87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch
324513d75def9fb78ccc5f446e1fae28e7069e94c1ebac406776750cd05f1bf6f0f8a9216543ee6bf82a68d9834e2a1404093d92cc2acd2cb28e3f9a478ad0c6 kernelconfig.x86
diff --git a/main/linux-virt-grsec/grsecurity-3.0-3.14.8-201406191347.patch b/main/linux-virt-grsec/grsecurity-3.0-3.14.11-201407072045.patch
index cf0e6f3646..a883f759f7 100644
--- a/main/linux-virt-grsec/grsecurity-3.0-3.14.8-201406191347.patch
+++ b/main/linux-virt-grsec/grsecurity-3.0-3.14.11-201407072045.patch
@@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index ef1d59b..7030652 100644
+index f1bbec5..d78810b 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2956,11 +2956,23 @@ index 4693188..4596c5e 100644
static int (*invoke_psci_fn)(u32, u32, u32, u32);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
-index 0dd3b79..e018f64 100644
+index 0dd3b79..b67388e 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
-@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
- return current_thread_info()->syscall;
+@@ -908,7 +908,7 @@ enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_EXIT,
+ };
+
+-static int tracehook_report_syscall(struct pt_regs *regs,
++static void tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
+ {
+ unsigned long ip;
+@@ -926,19 +926,29 @@ static int tracehook_report_syscall(struct pt_regs *regs,
+ current_thread_info()->syscall = -1;
+
+ regs->ARM_ip = ip;
+- return current_thread_info()->syscall;
}
+#ifdef CONFIG_GRKERNSEC_SETXID
@@ -2979,6 +2991,15 @@ index 0dd3b79..e018f64 100644
/* Do the secure computing check first; failures should be fast. */
if (secure_computing(scno) == -1)
return -1;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+- scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
++
++ scno = current_thread_info()->syscall;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, scno);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1e8b030..37c3022 100644
--- a/arch/arm/kernel/setup.c
@@ -5437,10 +5458,10 @@ index 7225dad..2a7c8256 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 68232db..6ca80af 100644
+index 76069c1..c2aa816 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info;
@@ -5448,7 +5469,7 @@ index 68232db..6ca80af 100644
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
-@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
info.align_offset = 0;
@@ -5551,10 +5572,10 @@ index 2d6f0de..de5f5ac 100644
#define smp_load_acquire(p) \
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
-index 0424315..defcca9 100644
+index 3c52fa6..11b2ad8 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
-@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & HUGEPT_MASK;
info.align_offset = 0;
@@ -6739,18 +6760,18 @@ index 25da651..ae2a259 100644
#endif /* __ASM_SMTC_PROC_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index e80ae50..4404147 100644
+index e80ae50..b93dd2e 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
-@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
- #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
- #define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
+@@ -105,6 +105,8 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_SECCOMP 4 /* secure computing */
+ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
+/* li takes a 32bit immediate */
-+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
- #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
-
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
++#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
+ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_NOHZ 19 /* in adaptive nohz mode */
@@ -134,14 +136,15 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
@@ -12213,7 +12234,7 @@ index c4d3da6..1aed043 100644
if (write && !pmd_write(pmd))
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 9bd9ce8..dc84852 100644
+index d329537..2c3746a 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
@@ -12485,10 +12506,10 @@ index b6cde32..c0cb736 100644
else
copy_from_user_overflow();
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
-index 0cb3bba..7338b2d 100644
+index e514899..f8743c4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
-@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -12496,7 +12517,7 @@ index 0cb3bba..7338b2d 100644
return vm_unmapped_area(&info);
}
-@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -12622,7 +12643,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 0af5250..59f9597 100644
+index 1981dd9..8f3ff4d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -126,7 +126,7 @@ config X86
@@ -16115,7 +16136,7 @@ index 69bbb48..32517fe 100644
#define smp_load_acquire(p) \
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
-index 9fc1af7..fc71228 100644
+index 9fc1af7..776d75a 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -49,7 +49,7 @@
@@ -16195,7 +16216,7 @@ index 9fc1af7..fc71228 100644
*/
#ifdef CONFIG_X86_64
-static __always_inline int fls64(__u64 x)
-+static __always_inline long fls64(__u64 x)
++static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
{
int bitpos = -1;
/*
@@ -17682,7 +17703,7 @@ index 86f9301..b365cda 100644
void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
-index 775873d..de5f0304 100644
+index 775873d..04cd306 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
@@ -17693,6 +17714,29 @@ index 775873d..de5f0304 100644
#define __boot_va(x) __va(x)
#define __boot_pa(x) __pa(x)
+@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ * virt_to_page(kaddr) returns a valid pointer if and only if
+ * virt_addr_valid(kaddr) returns true.
+ */
+-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+ extern bool __virt_addr_valid(unsigned long kaddr);
+ #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++#define virt_to_page(kaddr) \
++ ({ \
++ const void *__kaddr = (const void *)(kaddr); \
++ BUG_ON(!virt_addr_valid(__kaddr)); \
++ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
++ })
++#else
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #include <asm-generic/memory_model.h>
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 0f1ddee..e2fc3d1 100644
--- a/arch/x86/include/asm/page_64.h
@@ -18690,7 +18734,7 @@ index fdedd38..95c02c2 100644
void df_debug(struct pt_regs *regs, long error_code);
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
-index 14fd6fd..6740420 100644
+index 6205f0c..b31a4a4 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
@@ -22445,7 +22489,7 @@ index 01d1c18..8073693 100644
#include <asm/processor.h>
#include <asm/fcntl.h>
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index a2a4f46..6cab058 100644
+index 6491353..a918952 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -177,13 +177,153 @@
@@ -22722,7 +22766,7 @@ index a2a4f46..6cab058 100644
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz sysenter_audit
sysenter_do_call:
-@@ -441,12 +613,24 @@ sysenter_do_call:
+@@ -442,12 +614,24 @@ sysenter_after_call:
testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit
sysenter_exit:
@@ -22747,7 +22791,7 @@ index a2a4f46..6cab058 100644
PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT
-@@ -463,6 +647,9 @@ sysenter_audit:
+@@ -464,6 +648,9 @@ sysenter_audit:
movl %eax,%edx /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
call __audit_syscall_entry
@@ -22757,7 +22801,7 @@ index a2a4f46..6cab058 100644
pushl_cfi %ebx
movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call
-@@ -488,10 +675,16 @@ sysexit_audit:
+@@ -489,10 +676,16 @@ sysexit_audit:
CFI_ENDPROC
.pushsection .fixup,"ax"
@@ -22776,7 +22820,7 @@ index a2a4f46..6cab058 100644
PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target)
-@@ -506,6 +699,11 @@ ENTRY(system_call)
+@@ -507,6 +700,11 @@ ENTRY(system_call)
pushl_cfi %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
@@ -22788,7 +22832,7 @@ index a2a4f46..6cab058 100644
# system call tracing in operation / emulation
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
jnz syscall_trace_entry
-@@ -524,6 +722,15 @@ syscall_exit:
+@@ -525,6 +723,15 @@ syscall_exit:
testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work
@@ -22804,7 +22848,7 @@ index a2a4f46..6cab058 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -580,14 +787,34 @@ ldt_ss:
+@@ -576,14 +783,34 @@ ldt_ss:
* compensating for the offset by changing to the ESPFIX segment with
* a base address that matches for the difference.
*/
@@ -22842,7 +22886,7 @@ index a2a4f46..6cab058 100644
pushl_cfi $__ESPFIX_SS
pushl_cfi %eax /* new kernel esp */
/* Disable interrupts, but do not irqtrace this section: we
-@@ -616,20 +843,18 @@ work_resched:
+@@ -612,20 +839,18 @@ work_resched:
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
@@ -22865,7 +22909,7 @@ index a2a4f46..6cab058 100644
#endif
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -650,7 +875,7 @@ work_notifysig_v86:
+@@ -646,7 +871,7 @@ work_notifysig_v86:
movl %eax, %esp
jmp 1b
#endif
@@ -22874,7 +22918,7 @@ index a2a4f46..6cab058 100644
# perform syscall exit tracing
ALIGN
-@@ -658,11 +883,14 @@ syscall_trace_entry:
+@@ -654,11 +879,14 @@ syscall_trace_entry:
movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
call syscall_trace_enter
@@ -22890,7 +22934,7 @@ index a2a4f46..6cab058 100644
# perform syscall exit tracing
ALIGN
-@@ -675,21 +903,25 @@ syscall_exit_work:
+@@ -671,26 +899,30 @@ syscall_exit_work:
movl %esp, %eax
call syscall_trace_leave
jmp resume_userspace
@@ -22913,13 +22957,19 @@ index a2a4f46..6cab058 100644
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
- jmp resume_userspace
+ jmp syscall_exit
-END(syscall_badsys)
+ENDPROC(syscall_badsys)
+
+ sysenter_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp sysenter_after_call
+-END(syscall_badsys)
++ENDPROC(sysenter_badsys)
CFI_ENDPROC
/*
* End of kprobes section
-@@ -705,8 +937,15 @@ END(syscall_badsys)
+@@ -706,8 +938,15 @@ END(syscall_badsys)
* normal stack and adjusts ESP with the matching offset.
*/
/* fixup the stack */
@@ -22937,7 +22987,7 @@ index a2a4f46..6cab058 100644
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl_cfi $__KERNEL_DS
-@@ -759,7 +998,7 @@ vector=vector+1
+@@ -760,7 +999,7 @@ vector=vector+1
.endr
2: jmp common_interrupt
.endr
@@ -22946,7 +22996,7 @@ index a2a4f46..6cab058 100644
.previous
END(interrupt)
-@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
+@@ -821,7 +1060,7 @@ ENTRY(coprocessor_error)
pushl_cfi $do_coprocessor_error
jmp error_code
CFI_ENDPROC
@@ -22955,7 +23005,7 @@ index a2a4f46..6cab058 100644
ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
-@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
+@@ -834,7 +1073,7 @@ ENTRY(simd_coprocessor_error)
.section .altinstructions,"a"
altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
.previous
@@ -22964,7 +23014,7 @@ index a2a4f46..6cab058 100644
663: pushl $do_simd_coprocessor_error
664:
.previous
-@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
+@@ -843,7 +1082,7 @@ ENTRY(simd_coprocessor_error)
#endif
jmp error_code
CFI_ENDPROC
@@ -22973,7 +23023,7 @@ index a2a4f46..6cab058 100644
ENTRY(device_not_available)
RING0_INT_FRAME
-@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
+@@ -852,18 +1091,18 @@ ENTRY(device_not_available)
pushl_cfi $do_device_not_available
jmp error_code
CFI_ENDPROC
@@ -22995,7 +23045,7 @@ index a2a4f46..6cab058 100644
#endif
ENTRY(overflow)
-@@ -872,7 +1111,7 @@ ENTRY(overflow)
+@@ -873,7 +1112,7 @@ ENTRY(overflow)
pushl_cfi $do_overflow
jmp error_code
CFI_ENDPROC
@@ -23004,7 +23054,7 @@ index a2a4f46..6cab058 100644
ENTRY(bounds)
RING0_INT_FRAME
-@@ -881,7 +1120,7 @@ ENTRY(bounds)
+@@ -882,7 +1121,7 @@ ENTRY(bounds)
pushl_cfi $do_bounds
jmp error_code
CFI_ENDPROC
@@ -23013,7 +23063,7 @@ index a2a4f46..6cab058 100644
ENTRY(invalid_op)
RING0_INT_FRAME
-@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
+@@ -891,7 +1130,7 @@ ENTRY(invalid_op)
pushl_cfi $do_invalid_op
jmp error_code
CFI_ENDPROC
@@ -23022,7 +23072,7 @@ index a2a4f46..6cab058 100644
ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
-@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
+@@ -900,7 +1139,7 @@ ENTRY(coprocessor_segment_overrun)
pushl_cfi $do_coprocessor_segment_overrun
jmp error_code
CFI_ENDPROC
@@ -23031,7 +23081,7 @@ index a2a4f46..6cab058 100644
ENTRY(invalid_TSS)
RING0_EC_FRAME
-@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
+@@ -908,7 +1147,7 @@ ENTRY(invalid_TSS)
pushl_cfi $do_invalid_TSS
jmp error_code
CFI_ENDPROC
@@ -23040,7 +23090,7 @@ index a2a4f46..6cab058 100644
ENTRY(segment_not_present)
RING0_EC_FRAME
-@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
+@@ -916,7 +1155,7 @@ ENTRY(segment_not_present)
pushl_cfi $do_segment_not_present
jmp error_code
CFI_ENDPROC
@@ -23049,7 +23099,7 @@ index a2a4f46..6cab058 100644
ENTRY(stack_segment)
RING0_EC_FRAME
-@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
+@@ -924,7 +1163,7 @@ ENTRY(stack_segment)
pushl_cfi $do_stack_segment
jmp error_code
CFI_ENDPROC
@@ -23058,7 +23108,7 @@ index a2a4f46..6cab058 100644
ENTRY(alignment_check)
RING0_EC_FRAME
-@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
+@@ -932,7 +1171,7 @@ ENTRY(alignment_check)
pushl_cfi $do_alignment_check
jmp error_code
CFI_ENDPROC
@@ -23067,7 +23117,7 @@ index a2a4f46..6cab058 100644
ENTRY(divide_error)
RING0_INT_FRAME
-@@ -940,7 +1179,7 @@ ENTRY(divide_error)
+@@ -941,7 +1180,7 @@ ENTRY(divide_error)
pushl_cfi $do_divide_error
jmp error_code
CFI_ENDPROC
@@ -23076,7 +23126,7 @@ index a2a4f46..6cab058 100644
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
-@@ -950,7 +1189,7 @@ ENTRY(machine_check)
+@@ -951,7 +1190,7 @@ ENTRY(machine_check)
pushl_cfi machine_check_vector
jmp error_code
CFI_ENDPROC
@@ -23085,7 +23135,7 @@ index a2a4f46..6cab058 100644
#endif
ENTRY(spurious_interrupt_bug)
-@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
+@@ -961,7 +1200,7 @@ ENTRY(spurious_interrupt_bug)
pushl_cfi $do_spurious_interrupt_bug
jmp error_code
CFI_ENDPROC
@@ -23094,7 +23144,7 @@ index a2a4f46..6cab058 100644
/*
* End of kprobes section
*/
-@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+@@ -1071,7 +1310,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
ENTRY(mcount)
ret
@@ -23103,7 +23153,7 @@ index a2a4f46..6cab058 100644
ENTRY(ftrace_caller)
cmpl $0, function_trace_stop
-@@ -1103,7 +1342,7 @@ ftrace_graph_call:
+@@ -1104,7 +1343,7 @@ ftrace_graph_call:
.globl ftrace_stub
ftrace_stub:
ret
@@ -23112,7 +23162,7 @@ index a2a4f46..6cab058 100644
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
-@@ -1207,7 +1446,7 @@ trace:
+@@ -1208,7 +1447,7 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
@@ -23121,7 +23171,7 @@ index a2a4f46..6cab058 100644
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
-@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
+@@ -1226,7 +1465,7 @@ ENTRY(ftrace_graph_caller)
popl %ecx
popl %eax
ret
@@ -23130,7 +23180,7 @@ index a2a4f46..6cab058 100644
.globl return_to_handler
return_to_handler:
-@@ -1291,15 +1530,18 @@ error_code:
+@@ -1292,15 +1531,18 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -23151,7 +23201,7 @@ index a2a4f46..6cab058 100644
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1342,7 +1584,7 @@ debug_stack_correct:
+@@ -1343,7 +1585,7 @@ debug_stack_correct:
call do_debug
jmp ret_from_exception
CFI_ENDPROC
@@ -23160,7 +23210,7 @@ index a2a4f46..6cab058 100644
/*
* NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1380,6 +1622,9 @@ nmi_stack_correct:
+@@ -1381,6 +1623,9 @@ nmi_stack_correct:
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -23170,7 +23220,7 @@ index a2a4f46..6cab058 100644
jmp restore_all_notrace
CFI_ENDPROC
-@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
+@@ -1417,12 +1662,15 @@ nmi_espfix_stack:
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
@@ -23187,7 +23237,7 @@ index a2a4f46..6cab058 100644
ENTRY(int3)
RING0_INT_FRAME
-@@ -1434,14 +1682,14 @@ ENTRY(int3)
+@@ -1435,14 +1683,14 @@ ENTRY(int3)
call do_int3
jmp ret_from_exception
CFI_ENDPROC
@@ -23204,7 +23254,7 @@ index a2a4f46..6cab058 100644
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
-@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
+@@ -1451,7 +1699,7 @@ ENTRY(async_page_fault)
pushl_cfi $do_async_page_fault
jmp error_code
CFI_ENDPROC
@@ -25939,7 +25989,7 @@ index 898160b..758cde8 100644
reset_current_kprobe();
preempt_enable_no_resched();
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
-index c2bedae..25e7ab6 100644
+index c2bedae..25e7ab60 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -184,7 +184,7 @@ out:
@@ -26814,7 +26864,7 @@ index 9c0280f..5bbb1c0 100644
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-index 7461f50..1334029 100644
+index 7461f50..01d0b9c 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -26836,7 +26886,28 @@ index 7461f50..1334029 100644
return (unsigned long)regs;
}
-@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
+@@ -452,6 +451,20 @@ static int putreg(struct task_struct *child,
+ if (child->thread.gs != value)
+ return do_arch_prctl(child, ARCH_SET_GS, value);
+ return 0;
++
++ case offsetof(struct user_regs_struct,ip):
++ /*
++ * Protect against any attempt to set ip to an
++ * impossible address. There are dragons lurking if the
++ * address is noncanonical. (This explicitly allows
++ * setting ip to TASK_SIZE_MAX, because user code can do
++ * that all by itself by running off the end of its
++ * address space.
++ */
++ if (value > TASK_SIZE_MAX)
++ return -EIO;
++ break;
++
+ #endif
+ }
+
+@@ -588,7 +601,7 @@ static void ptrace_triggered(struct perf_event *bp,
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
{
int i;
@@ -26845,7 +26916,7 @@ index 7461f50..1334029 100644
struct arch_hw_breakpoint *info;
for (i = 0; i < HBP_NUM; i++) {
-@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -822,7 +835,7 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
@@ -26854,7 +26925,7 @@ index 7461f50..1334029 100644
switch (request) {
/* read the word at location addr in the USER area. */
-@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -907,14 +920,14 @@ long arch_ptrace(struct task_struct *child, long request,
if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
@@ -26871,7 +26942,7 @@ index 7461f50..1334029 100644
break;
#endif
-@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+@@ -1292,7 +1305,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
#ifdef CONFIG_X86_64
@@ -26880,7 +26951,7 @@ index 7461f50..1334029 100644
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct) / sizeof(long),
-@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
+@@ -1333,7 +1346,7 @@ static const struct user_regset_view user_x86_64_view = {
#endif /* CONFIG_X86_64 */
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -26889,7 +26960,7 @@ index 7461f50..1334029 100644
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
-@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
+@@ -1386,7 +1399,7 @@ static const struct user_regset_view user_x86_32_view = {
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
@@ -26898,7 +26969,7 @@ index 7461f50..1334029 100644
{
#ifdef CONFIG_X86_64
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
-@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+@@ -1421,7 +1434,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
@@ -26907,7 +26978,7 @@ index 7461f50..1334029 100644
}
void user_single_step_siginfo(struct task_struct *tsk,
-@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+@@ -1450,6 +1463,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
# define IS_IA32 0
#endif
@@ -26918,7 +26989,7 @@ index 7461f50..1334029 100644
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
-@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
+@@ -1460,6 +1477,11 @@ long syscall_trace_enter(struct pt_regs *regs)
user_exit();
@@ -26930,7 +27001,7 @@ index 7461f50..1334029 100644
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
-@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
+@@ -1515,6 +1537,11 @@ void syscall_trace_leave(struct pt_regs *regs)
*/
user_exit();
@@ -28671,7 +28742,7 @@ index c697625..a032162 100644
out:
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 9736529..ab4f54c 100644
+index 0069118..c28ec0a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
@@ -32565,10 +32636,10 @@ index 4500142..53a363c 100644
return (void *)vaddr;
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index 8c9f647..57cb402 100644
+index 8b977eb..4732c33 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
-@@ -90,23 +90,24 @@ int pmd_huge_support(void)
+@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
@@ -32596,7 +32667,7 @@ index 8c9f647..57cb402 100644
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
-@@ -118,6 +119,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -32604,7 +32675,7 @@ index 8c9f647..57cb402 100644
addr = vm_unmapped_area(&info);
/*
-@@ -130,6 +132,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -32617,7 +32688,7 @@ index 8c9f647..57cb402 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -144,10 +152,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -32639,7 +32710,7 @@ index 8c9f647..57cb402 100644
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -156,19 +174,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return addr;
}
@@ -40383,7 +40454,9 @@ index 3b7d32d..05c2f74 100644
ret = -EFAULT;
goto done;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index 345be03..158368d 100644
+old mode 100644
+new mode 100755
+index 345be03..65b66c0
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
@@ -40404,6 +40477,17 @@ index 345be03..158368d 100644
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
char stack_kdata[128];
+@@ -380,8 +380,9 @@ long drm_ioctl(struct file *filp,
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+- } else
++ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
++ }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7f2af9a..1561914 100644
--- a/drivers/gpu/drm/drm_fops.c
@@ -41959,10 +42043,10 @@ index ec0ae2d..dc0780b 100644
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 8a5384c..cf63c18 100644
+index 7cd42ea..a367c48 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
-@@ -2422,7 +2422,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+@@ -2432,7 +2432,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -41971,7 +42055,7 @@ index 8a5384c..cf63c18 100644
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2456,7 +2456,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2466,7 +2466,7 @@ int hid_add_device(struct hid_device *hdev)
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -42062,10 +42146,10 @@ index bcb4950..61dba6c 100644
if (!virtaddr)
goto cleanup;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
-index 7e17a54..a50a33d 100644
+index 393fd8a..079e13f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
-@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
@@ -42074,7 +42158,7 @@ index 7e17a54..a50a33d 100644
static int dm_ring_size = (5 * PAGE_SIZE);
-@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
+@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
pr_info("Memory hot add failed\n");
dm->state = DM_INITIALIZED;
@@ -42083,7 +42167,7 @@ index 7e17a54..a50a33d 100644
vmbus_sendpacket(dm->dev->channel, &resp,
sizeof(struct dm_hot_add_response),
(unsigned long)NULL,
-@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
+@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
@@ -42092,7 +42176,7 @@ index 7e17a54..a50a33d 100644
/*
* The host expects the guest to report free memory.
-@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
+@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
* send the status. This can happen if we were interrupted
* after we picked our transaction ID.
*/
@@ -42100,8 +42184,8 @@ index 7e17a54..a50a33d 100644
+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
return;
- vmbus_sendpacket(dm->dev->channel, &status,
-@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
+ /*
+@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
*/
do {
@@ -42110,7 +42194,7 @@ index 7e17a54..a50a33d 100644
ret = vmbus_sendpacket(dm_device.dev->channel,
bl_resp,
bl_resp->hdr.size,
-@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
+@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
memset(&resp, 0, sizeof(struct dm_unballoon_response));
resp.hdr.type = DM_UNBALLOON_RESPONSE;
@@ -42119,7 +42203,7 @@ index 7e17a54..a50a33d 100644
resp.hdr.size = sizeof(struct dm_unballoon_response);
vmbus_sendpacket(dm_device.dev->channel, &resp,
-@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
+@@ -1238,7 +1238,7 @@ static void version_resp(struct hv_dynmem_device *dm,
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
@@ -42128,7 +42212,7 @@ index 7e17a54..a50a33d 100644
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
version_req.is_last_attempt = 1;
-@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
+@@ -1408,7 +1408,7 @@ static int balloon_probe(struct hv_device *dev,
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
@@ -42137,7 +42221,7 @@ index 7e17a54..a50a33d 100644
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
version_req.is_last_attempt = 0;
-@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
+@@ -1439,7 +1439,7 @@ static int balloon_probe(struct hv_device *dev,
memset(&cap_msg, 0, sizeof(struct dm_capabilities));
cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
cap_msg.hdr.size = sizeof(struct dm_capabilities);
@@ -46345,6 +46429,20 @@ index 455d4c3..3353ee7 100644
}
if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index dbcff50..5ed5124 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -793,7 +793,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+
+ return;
+ }
+- bnx2x_frag_free(fp, new_data);
++ if (new_data)
++ bnx2x_frag_free(fp, new_data);
+ drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f..5a8a2ac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -46612,18 +46710,6 @@ index be7d7a6..a8983f8 100644
break;
default:
dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
-diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
-index 7d4f549..3e46c89 100644
---- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
-+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
-@@ -1022,6 +1022,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
- struct qlcnic_dcb_cee *peer;
- int i;
-
-+ memset(info, 0, sizeof(*info));
- *app_count = 0;
-
- if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962..c3499a7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -46745,7 +46831,7 @@ index bf0d55e..82bcfbd1 100644
priv = netdev_priv(dev);
priv->phy = phy;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index 20bb669..9a0e17e 100644
+index 5adecc5..aec7730 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -991,13 +991,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
@@ -46771,7 +46857,7 @@ index 20bb669..9a0e17e 100644
return rtnl_link_register(ops);
};
-@@ -1052,7 +1054,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+@@ -1051,7 +1053,7 @@ static int macvlan_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -46850,10 +46936,10 @@ index 1252d9c..80e660b 100644
/* We've got a compressed packet; read the change byte */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
-index c8624a8..f0a4f6a 100644
+index 26d8c29..bbc6837 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
-@@ -2869,7 +2869,7 @@ static int team_device_event(struct notifier_block *unused,
+@@ -2874,7 +2874,7 @@ static int team_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -47011,11 +47097,24 @@ index a2515887..6d13233 100644
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
/* we will have to manufacture ethernet headers, prepare template */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 841b608..198a8b7 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
+ #define RECEIVE_AVG_WEIGHT 64
+
+ /* Minimum alignment for mergeable packet buffers. */
+-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
+
+ #define VIRTNET_DRIVER_VERSION "1.0.0"
+
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index d091e52..568bb179 100644
+index 40ad25d..8703023 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -2847,7 +2847,7 @@ nla_put_failure:
+@@ -2846,7 +2846,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -47024,7 +47123,7 @@ index d091e52..568bb179 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2894,7 +2894,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2893,7 +2893,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -50821,10 +50920,10 @@ index 24884ca..26c8220 100644
login->tgt_agt = sbp_target_agent_register(login);
if (IS_ERR(login->tgt_agt)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
-index 26416c1..e796a3d 100644
+index 6ea95d2..88607b4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
-@@ -1524,7 +1524,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
sema_init(&dev->caw_sem, 1);
@@ -50834,10 +50933,10 @@ index 26416c1..e796a3d 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 98b48d4..f4297e5 100644
+index 24f5279..046edc5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
-@@ -1137,7 +1137,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
+@@ -1154,7 +1154,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
@@ -52579,7 +52678,7 @@ index 4d11449..f4ccabf 100644
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index 2da0a5a..4870e09 100644
+index 09e9619..d266724 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
@@ -56483,7 +56582,7 @@ index ce25d75..dc09eeb 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index 04cd768..25949c1 100644
+index 19e7d95..af5756a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -375,7 +375,7 @@ static int aio_setup_ring(struct kioctx *ctx)
@@ -57786,10 +57885,10 @@ index d04db81..96e54f1 100644
wake_up(&root->fs_info->transaction_wait);
wake_up(&root->fs_info->transaction_blocked_wait);
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
-index 865f4cf..f321e86 100644
+index ff286f3..8153a14 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
-@@ -436,7 +436,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
+@@ -437,7 +437,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
for (set = 0; set < FEAT_MAX; set++) {
int i;
struct attribute *attrs[2];
@@ -58436,10 +58535,10 @@ index 35ddc3e..563e809 100644
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
-index 8603447..f9caeee 100644
+index 049a3f2..0f41305 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
-@@ -2094,8 +2094,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+@@ -2099,8 +2099,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
@@ -59775,7 +59874,7 @@ index 6ea7b14..8fa16d9 100644
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 3a603a8..9b868ba 100644
+index 62f024c..a6a1a61 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1269,19 +1269,19 @@ struct ext4_sb_info {
@@ -59808,8 +59907,65 @@ index 3a603a8..9b868ba 100644
atomic_t s_lock_busy;
/* locality groups */
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 594009f..c30cbe2 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ return 0;
+ failed:
+ for (; i >= 0; i--) {
+- if (i != indirect_blks && branch[i].bh)
++ /*
++ * We want to ext4_forget() only freshly allocated indirect
++ * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
++ * buffer at branch[0].bh is indirect block / inode already
++ * existing before ext4_alloc_branch() was called.
++ */
++ if (i > 0 && i != indirect_blks && branch[i].bh)
+ ext4_forget(handle, 1, inode, branch[i].bh,
+ branch[i].bh->b_blocknr);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i],
+@@ -1312,16 +1318,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ blk = *i_data;
+ if (level > 0) {
+ ext4_lblk_t first2;
++ ext4_lblk_t count2;
++
+ bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
+ if (!bh) {
+ EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
+ "Read failure");
+ return -EIO;
+ }
+- first2 = (first > offset) ? first - offset : 0;
++ if (first > offset) {
++ first2 = first - offset;
++ count2 = count;
++ } else {
++ first2 = 0;
++ count2 = count - (offset - first);
++ }
+ ret = free_hole_blocks(handle, inode, bh,
+ (__le32 *)bh->b_data, level - 1,
+- first2, count - offset,
++ first2, count2,
+ inode->i_sb->s_blocksize >> 2);
+ if (ret) {
+ brelse(bh);
+@@ -1331,8 +1345,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ if (level == 0 ||
+ (bh && all_zeroes((__le32 *)bh->b_data,
+ (__le32 *)bh->b_data + addr_per_block))) {
+- ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
+- *i_data = 0;
++ ext4_free_data(handle, inode, parent_bh,
++ i_data, i_data + 1);
+ }
+ brelse(bh);
+ bh = NULL;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 04a5c75..09894fa 100644
+index 08ddfda..a48f3f6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
@@ -61696,11 +61852,15 @@ index d19b30a..ef89c36 100644
static int can_do_hugetlb_shm(void)
{
diff --git a/fs/inode.c b/fs/inode.c
-index e846a32..6b22e15 100644
+index e846a32..bb06bd0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
-@@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
+@@ -839,16 +839,20 @@ unsigned int get_next_ino(void)
+ unsigned int *p = &get_cpu_var(last_ino);
+ unsigned int res = *p;
++start:
++
#ifdef CONFIG_SMP
if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
- static atomic_t shared_last_ino;
@@ -61710,6 +61870,15 @@ index e846a32..6b22e15 100644
res = next - LAST_INO_BATCH;
}
+ #endif
+
+- *p = ++res;
++ if (unlikely(!++res))
++ goto start; /* never zero */
++ *p = res;
+ put_cpu_var(last_ino);
+ return res;
+ }
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 4a6cf28..d3a29d3 100644
--- a/fs/jffs2/erase.c
@@ -62648,7 +62817,7 @@ index f4ccfe6..a5cf064 100644
static struct callback_op callback_ops[];
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index 360114a..ac6e265 100644
+index 15f9d98..082c625 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1189,16 +1189,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
@@ -62685,7 +62854,7 @@ index 9a914e8..e89c0ea 100644
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 16e8fa7..b0803f6 100644
+index bc11bf6..324b058 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1531,7 +1531,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
@@ -80782,7 +80951,7 @@ index c45c089..298841c 100644
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
-index 5f1ea75..5125ac5 100644
+index 5bba088..7ad4ae7 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
@@ -80796,7 +80965,7 @@ index 5f1ea75..5125ac5 100644
static inline void mpol_get(struct mempolicy *pol)
{
-@@ -223,6 +227,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
+@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
}
#define vma_policy(vma) NULL
@@ -81137,10 +81306,10 @@ index c5d5278..f0b68c8 100644
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 9b61b9b..52147d6b 100644
+index e6800f0..d59674e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
-@@ -396,7 +396,7 @@ struct zone {
+@@ -400,7 +400,7 @@ struct zone {
unsigned long flags; /* zone flags, see below */
/* Zone statistics */
@@ -81150,18 +81319,9 @@ index 9b61b9b..52147d6b 100644
/*
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
-index 45e9214..a7227d6 100644
+index 45e9214..4a547ac 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
-@@ -13,7 +13,7 @@
- typedef unsigned long kernel_ulong_t;
- #endif
-
--#define PCI_ANY_ID (~0)
-+#define PCI_ANY_ID ((__u16)~0)
-
- struct pci_device_id {
- __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
@@ -139,7 +139,7 @@ struct usb_device_id {
#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
@@ -81508,36 +81668,6 @@ index 0000000..33f4af8
+};
+
+#endif
-diff --git a/include/linux/netlink.h b/include/linux/netlink.h
-index aad8eea..034cda7 100644
---- a/include/linux/netlink.h
-+++ b/include/linux/netlink.h
-@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
- }
-
- enum netlink_skb_flags {
-- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
-- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
-- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
-+ NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
-+ NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
-+ NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
-+ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
- };
-
- struct netlink_skb_parms {
-@@ -169,4 +170,11 @@ struct netlink_tap {
- extern int netlink_add_tap(struct netlink_tap *nt);
- extern int netlink_remove_tap(struct netlink_tap *nt);
-
-+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
-+ struct user_namespace *ns, int cap);
-+bool netlink_ns_capable(const struct sk_buff *skb,
-+ struct user_namespace *ns, int cap);
-+bool netlink_capable(const struct sk_buff *skb, int cap);
-+bool netlink_net_capable(const struct sk_buff *skb, int cap);
-+
- #endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 520681b..1d67ed2 100644
--- a/include/linux/nls.h
@@ -82226,8 +82356,33 @@ index b66c211..13d2915 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
+diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
+index a964f72..b475afb 100644
+--- a/include/linux/scatterlist.h
++++ b/include/linux/scatterlist.h
+@@ -1,6 +1,7 @@
+ #ifndef _LINUX_SCATTERLIST_H
+ #define _LINUX_SCATTERLIST_H
+
++#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/bug.h>
+ #include <linux/mm.h>
+@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
+ #ifdef CONFIG_DEBUG_SG
+ BUG_ON(!virt_addr_valid(buf));
+ #endif
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ if (object_starts_on_stack(buf)) {
++ void *adjbuf = buf - current->stack + current->lowmem_stack;
++ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
++ } else
++#endif
+ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+ }
+
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ccd0c6f..39c28a4 100644
+index ccd0c6f..84d9030 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -129,6 +129,7 @@ struct fs_struct;
@@ -82318,7 +82473,17 @@ index ccd0c6f..39c28a4 100644
extern int uids_sysfs_init(void);
-@@ -1286,8 +1319,8 @@ struct task_struct {
+@@ -1164,6 +1197,9 @@ enum perf_event_task_context {
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ void *stack;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ void *lowmem_stack;
++#endif
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
+@@ -1286,8 +1322,8 @@ struct task_struct {
struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
@@ -82329,7 +82494,7 @@ index ccd0c6f..39c28a4 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1312,11 +1345,6 @@ struct task_struct {
+@@ -1312,11 +1348,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -82341,7 +82506,7 @@ index ccd0c6f..39c28a4 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1333,6 +1361,10 @@ struct task_struct {
+@@ -1333,6 +1364,10 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -82352,7 +82517,7 @@ index ccd0c6f..39c28a4 100644
/* filesystem information */
struct fs_struct *fs;
/* open file information */
-@@ -1409,6 +1441,10 @@ struct task_struct {
+@@ -1409,6 +1444,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
@@ -82363,7 +82528,7 @@ index ccd0c6f..39c28a4 100644
/* journalling filesystem info */
void *journal_info;
-@@ -1447,6 +1483,10 @@ struct task_struct {
+@@ -1447,6 +1486,10 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -82374,7 +82539,7 @@ index ccd0c6f..39c28a4 100644
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
-@@ -1581,7 +1621,78 @@ struct task_struct {
+@@ -1581,7 +1624,78 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -82454,7 +82619,7 @@ index ccd0c6f..39c28a4 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1658,7 +1769,7 @@ struct pid_namespace;
+@@ -1658,7 +1772,7 @@ struct pid_namespace;
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
@@ -82463,7 +82628,7 @@ index ccd0c6f..39c28a4 100644
{
return tsk->pid;
}
-@@ -2006,6 +2117,25 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2006,6 +2120,25 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
@@ -82489,7 +82654,7 @@ index ccd0c6f..39c28a4 100644
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
-@@ -2130,7 +2260,9 @@ void yield(void);
+@@ -2130,7 +2263,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -82499,7 +82664,7 @@ index ccd0c6f..39c28a4 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2163,6 +2295,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2163,6 +2298,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -82507,7 +82672,7 @@ index ccd0c6f..39c28a4 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2325,7 +2458,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2325,7 +2461,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -82516,12 +82681,12 @@ index ccd0c6f..39c28a4 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2526,9 +2659,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2526,9 +2662,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
-static inline int object_is_on_stack(void *obj)
-+static inline int object_starts_on_stack(void *obj)
++static inline int object_starts_on_stack(const void *obj)
{
- void *stack = task_stack_page(current);
+ const void *stack = task_stack_page(current);
@@ -82850,7 +83015,7 @@ index 6ae004e..2743532 100644
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
-index 302ab80..3233276 100644
+index 46cca4c..3323536 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -11,7 +11,7 @@ struct sock;
@@ -83470,7 +83635,7 @@ index 502073a..a7de024 100644
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 4b8a891..05f2361 100644
+index 4b8a891..e9a2863 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -83485,15 +83650,18 @@ index 4b8a891..05f2361 100644
/* bits [20..32] reserved for arch specific ioremap internals */
/*
-@@ -72,6 +77,7 @@ extern void *vzalloc_node(unsigned long size, int node);
- extern void *vmalloc_exec(unsigned long size);
- extern void *vmalloc_32(unsigned long size);
- extern void *vmalloc_32_user(unsigned long size);
-+extern void *vmalloc_stack(int node);
- extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
- extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
-@@ -142,7 +148,7 @@ extern void free_vm_area(struct vm_struct *area);
+@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
+ unsigned long flags, pgprot_t prot);
+ extern void vunmap(const void *addr);
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++extern void unmap_process_stacks(struct task_struct *task);
++#endif
++
+ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
+ unsigned long uaddr, void *kaddr,
+ unsigned long size);
+@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
/* for /dev/kmem */
extern long vread(char *buf, char *addr, unsigned long count);
@@ -83759,7 +83927,7 @@ index c55aeed..b3393f4 100644
/** inet_connection_sock - INET connection oriented sock
*
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
-index 6efe73c..1a44af7 100644
+index 058271b..1a44af7 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -47,8 +47,8 @@ struct inet_peer {
@@ -83773,20 +83941,11 @@ index 6efe73c..1a44af7 100644
};
struct rcu_head rcu;
struct inet_peer *gc_next;
-@@ -177,16 +177,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
- /* can be called with or without local BH being disabled */
- static inline int inet_getid(struct inet_peer *p, int more)
+@@ -179,7 +179,7 @@ static inline int inet_getid(struct inet_peer *p, int more)
{
-- int old, new;
more++;
inet_peer_refcheck(p);
-- do {
-- old = atomic_read(&p->ip_id_count);
-- new = old + more;
-- if (!new)
-- new = 1;
-- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
-- return new;
+- return atomic_add_return(more, &p->ip_id_count) - more;
+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
}
@@ -84254,7 +84413,7 @@ index 0dfcc92..7967849 100644
/* Structure to track chunk fragments that have been acked, but peer
diff --git a/include/net/sock.h b/include/net/sock.h
-index b9586a1..b2948c0 100644
+index 57c31dd..f5e5196 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -348,7 +348,7 @@ struct sock {
@@ -84293,6 +84452,17 @@ index b9586a1..b2948c0 100644
static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
{
+@@ -1755,8 +1755,8 @@ sk_dst_get(struct sock *sk)
+
+ rcu_read_lock();
+ dst = rcu_dereference(sk->sk_dst_cache);
+- if (dst)
+- dst_hold(dst);
++ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++ dst = NULL;
+ rcu_read_unlock();
+ return dst;
+ }
@@ -1830,7 +1830,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
}
@@ -85745,7 +85915,7 @@ index 8d6e145..33e0b1e 100644
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
diff --git a/kernel/audit.c b/kernel/audit.c
-index d5f31c1..06646e1 100644
+index 0c9dc86..a891393 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
@@ -86582,49 +86752,112 @@ index 81b3d67..ef189a4 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index a17621c..d9e4b37 100644
+index c44bff8..a3c5876 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -137,6 +137,18 @@ void __weak arch_release_thread_info(struct thread_info *ti)
- {
- }
+@@ -180,6 +180,48 @@ void thread_info_cache_init(void)
+ # endif
+ #endif
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
-+ int node)
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
++ int node, void **lowmem_stack)
+{
-+ return vmalloc_stack(node);
++ struct page *pages[THREAD_SIZE / PAGE_SIZE];
++ void *ret = NULL;
++ unsigned int i;
++
++ *lowmem_stack = alloc_thread_info_node(tsk, node);
++ if (*lowmem_stack == NULL)
++ goto out;
++
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
++ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
++
++ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
++ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
++ if (ret == NULL) {
++ free_thread_info(*lowmem_stack);
++ *lowmem_stack = NULL;
++ }
++
++out:
++ return ret;
+}
+
-+static inline void free_thread_info(struct thread_info *ti)
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
+{
-+ vfree(ti);
++ unmap_process_stacks(tsk);
+}
+#else
- #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
-
- /*
-@@ -179,6 +191,7 @@ void thread_info_cache_init(void)
- }
- # endif
- #endif
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
++ int node, void **lowmem_stack)
++{
++ return alloc_thread_info_node(tsk, node);
++}
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
++{
++ free_thread_info(ti);
++}
+#endif
-
++
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
-@@ -200,9 +213,11 @@ static struct kmem_cache *mm_cachep;
- static void account_kernel_stack(struct thread_info *ti, int account)
+@@ -198,18 +240,22 @@ struct kmem_cache *vm_area_cachep;
+ /* SLAB cache for mm_struct structures (tsk->mm) */
+ static struct kmem_cache *mm_cachep;
+
+-static void account_kernel_stack(struct thread_info *ti, int account)
++static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
{
-+#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
++#else
struct zone *zone = page_zone(virt_to_page(ti));
++#endif
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
-+#endif
}
void free_task(struct task_struct *tsk)
-@@ -319,7 +334,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ {
+- account_kernel_stack(tsk->stack, -1);
++ account_kernel_stack(tsk, tsk->stack, -1);
+ arch_release_thread_info(tsk->stack);
+- free_thread_info(tsk->stack);
++ gr_free_thread_info(tsk, tsk->stack);
+ rt_mutex_debug_task_free(tsk);
+ ftrace_graph_exit_task(tsk);
+ put_seccomp_filter(tsk);
+@@ -295,6 +341,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ struct task_struct *tsk;
+ struct thread_info *ti;
+ unsigned long *stackend;
++ void *lowmem_stack;
+ int node = tsk_fork_get_node(orig);
+ int err;
+
+@@ -302,7 +349,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ if (!tsk)
+ return NULL;
+
+- ti = alloc_thread_info_node(tsk, node);
++ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
+ if (!ti)
+ goto free_tsk;
+
+@@ -311,6 +358,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ goto free_ti;
+
+ tsk->stack = ti;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ tsk->lowmem_stack = lowmem_stack;
++#endif
+
+ setup_thread_stack(tsk, orig);
+ clear_user_return_notifier(tsk);
+@@ -319,7 +369,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -86633,7 +86866,21 @@ index a17621c..d9e4b37 100644
#endif
/*
-@@ -345,12 +360,80 @@ free_tsk:
+@@ -333,24 +383,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ tsk->splice_pipe = NULL;
+ tsk->task_frag.page = NULL;
+
+- account_kernel_stack(ti, 1);
++ account_kernel_stack(tsk, ti, 1);
+
+ return tsk;
+
+ free_ti:
+- free_thread_info(ti);
++ gr_free_thread_info(tsk, ti);
+ free_tsk:
+ free_task_struct(tsk);
+ return NULL;
}
#ifdef CONFIG_MMU
@@ -86716,7 +86963,7 @@ index a17621c..d9e4b37 100644
uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
-@@ -379,55 +462,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -379,55 +497,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -86776,7 +87023,7 @@ index a17621c..d9e4b37 100644
}
/*
-@@ -459,6 +502,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -459,6 +537,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
}
@@ -86808,7 +87055,7 @@ index a17621c..d9e4b37 100644
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
-@@ -468,14 +536,6 @@ out:
+@@ -468,14 +571,6 @@ out:
up_write(&oldmm->mmap_sem);
uprobe_end_dup_mmap();
return retval;
@@ -86823,7 +87070,7 @@ index a17621c..d9e4b37 100644
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -689,8 +749,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+@@ -689,8 +784,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
return ERR_PTR(err);
mm = get_task_mm(task);
@@ -86834,7 +87081,7 @@ index a17621c..d9e4b37 100644
mmput(mm);
mm = ERR_PTR(-EACCES);
}
-@@ -906,13 +966,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -906,13 +1001,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
spin_unlock(&fs->lock);
return -EAGAIN;
}
@@ -86856,7 +87103,7 @@ index a17621c..d9e4b37 100644
return 0;
}
-@@ -1130,7 +1197,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
+@@ -1130,7 +1232,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
@@ -86865,7 +87112,7 @@ index a17621c..d9e4b37 100644
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
-@@ -1202,6 +1269,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1202,6 +1304,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
@@ -86875,7 +87122,7 @@ index a17621c..d9e4b37 100644
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (p->real_cred->user != INIT_USER &&
-@@ -1449,6 +1519,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1449,6 +1554,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
}
@@ -86887,7 +87134,7 @@ index a17621c..d9e4b37 100644
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
-@@ -1537,6 +1612,8 @@ bad_fork_cleanup_count:
+@@ -1539,6 +1649,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -86896,7 +87143,7 @@ index a17621c..d9e4b37 100644
return ERR_PTR(retval);
}
-@@ -1598,6 +1675,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1600,6 +1712,7 @@ long do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace);
@@ -86904,7 +87151,7 @@ index a17621c..d9e4b37 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1612,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1616,6 +1729,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -86913,7 +87160,7 @@ index a17621c..d9e4b37 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1728,7 +1808,7 @@ void __init proc_caches_init(void)
+@@ -1734,7 +1849,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -86922,7 +87169,7 @@ index a17621c..d9e4b37 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1768,7 +1848,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1774,7 +1889,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -86931,7 +87178,7 @@ index a17621c..d9e4b37 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1955,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1996,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -91451,10 +91698,10 @@ index fc4da2d..f3e800b 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 24c1f23..781fd73f 100644
+index f0831c22..4b19cb3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3399,7 +3399,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3400,7 +3400,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -91644,7 +91891,7 @@ index 4f69f9a..7c6f8f8 100644
memcpy(&uts_table, table, sizeof(uts_table));
uts_table.data = get_uts(table, write);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index 4431610..4265616 100644
+index c9b6f01..37781d9 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
@@ -92436,10 +92683,10 @@ index 0000000..7cd6065
@@ -0,0 +1 @@
+-grsec
diff --git a/mm/Kconfig b/mm/Kconfig
-index 2888024..c15a810 100644
+index 9b63c15..2ab509e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -326,10 +326,11 @@ config KSM
+@@ -329,10 +329,11 @@ config KSM
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
config DEFAULT_MMAP_MIN_ADDR
@@ -92454,7 +92701,7 @@ index 2888024..c15a810 100644
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
-@@ -360,7 +361,7 @@ config MEMORY_FAILURE
+@@ -363,7 +364,7 @@ config MEMORY_FAILURE
config HWPOISON_INJECT
tristate "HWPoison pages injector"
@@ -92881,7 +93128,7 @@ index 539eeb9..e24a987 100644
if (end == start)
return error;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index e346fa9..5d32f0a 100644
+index 33365e9..2234ef9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -92902,7 +93149,7 @@ index e346fa9..5d32f0a 100644
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
-@@ -762,7 +762,7 @@ static struct page_state {
+@@ -795,7 +795,7 @@ static struct page_state {
unsigned long res;
char *msg;
int (*action)(struct page *p, unsigned long pfn);
@@ -92911,7 +93158,7 @@ index e346fa9..5d32f0a 100644
{ reserved, reserved, "reserved kernel", me_kernel },
/*
* free pages are specially detected outside this table:
-@@ -1062,7 +1062,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1095,7 +1095,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
nr_pages = 1 << compound_order(hpage);
else /* normal page or thp */
nr_pages = 1;
@@ -92920,7 +93167,7 @@ index e346fa9..5d32f0a 100644
/*
* We need/can do nothing about count=0 pages.
-@@ -1091,7 +1091,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (PageHWPoison(hpage)) {
if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
@@ -92929,7 +93176,7 @@ index e346fa9..5d32f0a 100644
unlock_page(hpage);
return 0;
}
-@@ -1157,14 +1157,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1190,14 +1190,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
@@ -92946,7 +93193,7 @@ index e346fa9..5d32f0a 100644
unlock_page(hpage);
put_page(hpage);
return 0;
-@@ -1386,7 +1386,7 @@ int unpoison_memory(unsigned long pfn)
+@@ -1419,7 +1419,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
if (TestClearPageHWPoison(p))
@@ -92955,7 +93202,7 @@ index e346fa9..5d32f0a 100644
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
-@@ -1400,7 +1400,7 @@ int unpoison_memory(unsigned long pfn)
+@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn)
*/
if (TestClearPageHWPoison(page)) {
pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
@@ -92964,7 +93211,7 @@ index e346fa9..5d32f0a 100644
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
-@@ -1525,11 +1525,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
+@@ -1558,11 +1558,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
@@ -92978,7 +93225,7 @@ index e346fa9..5d32f0a 100644
}
}
return ret;
-@@ -1568,7 +1568,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1601,7 +1601,7 @@ static int __soft_offline_page(struct page *page, int flags)
put_page(page);
pr_info("soft_offline: %#lx: invalidated\n", pfn);
SetPageHWPoison(page);
@@ -92987,7 +93234,7 @@ index e346fa9..5d32f0a 100644
return 0;
}
-@@ -1619,7 +1619,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
if (!is_free_buddy_page(page))
pr_info("soft offline: %#lx: page leaked\n",
pfn);
@@ -92996,7 +93243,7 @@ index e346fa9..5d32f0a 100644
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
-@@ -1693,11 +1693,11 @@ int soft_offline_page(struct page *page, int flags)
+@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
@@ -95524,7 +95771,7 @@ index 8740213..f87e25b 100644
struct mm_struct *mm;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 8f6daa6..1f8587c 100644
+index d013dba..d5ae30d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -685,7 +685,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
@@ -95537,7 +95784,7 @@ index 8f6daa6..1f8587c 100644
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 7387a67..67105e4 100644
+index 4b5d4f6..56dfb0a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -95642,7 +95889,7 @@ index 7387a67..67105e4 100644
}
}
-@@ -6565,4 +6605,4 @@ void dump_page(struct page *page, char *reason)
+@@ -6577,4 +6617,4 @@ void dump_page(struct page *page, char *reason)
{
dump_page_badflags(page, reason, 0);
}
@@ -95726,10 +95973,10 @@ index fd26d04..0cea1b0 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index 5d91bb7..3784601 100644
+index cdbd312..2e1e0b9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
-@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
@@ -95740,7 +95987,7 @@ index 5d91bb7..3784601 100644
might_sleep();
if (unlikely(!anon_vma)) {
struct mm_struct *mm = vma->vm_mm;
-@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (!avc)
goto out_enomem;
@@ -95753,7 +96000,7 @@ index 5d91bb7..3784601 100644
anon_vma = find_mergeable_anon_vma(vma);
allocated = NULL;
if (!anon_vma) {
-@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
@@ -95772,7 +96019,7 @@ index 5d91bb7..3784601 100644
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
allocated = NULL;
-@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (unlikely(allocated))
put_anon_vma(allocated);
@@ -95797,7 +96044,7 @@ index 5d91bb7..3784601 100644
anon_vma_chain_free(avc);
out_enomem:
return -ENOMEM;
-@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
+@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
*/
@@ -95806,7 +96053,7 @@ index 5d91bb7..3784601 100644
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
-@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
@@ -95815,7 +96062,7 @@ index 5d91bb7..3784601 100644
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
-@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
+@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
@@ -97107,10 +97354,65 @@ index a24aa22..a0d41ae 100644
}
#endif
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 0fdf968..2183ba3 100644
+index 0fdf968..991ff6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -38,6 +38,21 @@ struct vfree_deferred {
+ };
+ static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++struct stack_deferred_llist {
++ struct llist_head list;
++ void *stack;
++ void *lowmem_stack;
++};
++
++struct stack_deferred {
++ struct stack_deferred_llist list;
++ struct work_struct wq;
++};
++
++static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
++#endif
++
+ static void __vunmap(const void *, int);
+
+ static void free_work(struct work_struct *w)
+@@ -45,12 +60,30 @@ static void free_work(struct work_struct *w)
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+ struct llist_node *llnode = llist_del_all(&p->list);
+ while (llnode) {
+- void *p = llnode;
++ void *x = llnode;
+ llnode = llist_next(llnode);
+- __vunmap(p, 1);
++ __vunmap(x, 1);
+ }
+ }
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static void unmap_work(struct work_struct *w)
++{
++ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
++ struct llist_node *llnode = llist_del_all(&p->list.list);
++ while (llnode) {
++ struct stack_deferred_llist *x =
++ llist_entry((struct llist_head *)llnode,
++ struct stack_deferred_llist, list);
++ void *stack = ACCESS_ONCE(x->stack);
++ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
++ llnode = llist_next(llnode);
++ __vunmap(stack, 0);
++ free_memcg_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
++ }
++}
++#endif
++
+ /*** Page table manipulation functions ***/
+
+ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -59,8 +92,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
pte = pte_offset_kernel(pmd, addr);
do {
@@ -97132,7 +97434,7 @@ index 0fdf968..2183ba3 100644
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+@@ -120,16 +164,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
@@ -97164,7 +97466,7 @@ index 0fdf968..2183ba3 100644
return 0;
}
-@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -139,7 +196,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
pmd_t *pmd;
unsigned long next;
@@ -97173,7 +97475,7 @@ index 0fdf968..2183ba3 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+@@ -156,7 +213,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
pud_t *pud;
unsigned long next;
@@ -97182,7 +97484,7 @@ index 0fdf968..2183ba3 100644
if (!pud)
return -ENOMEM;
do {
-@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
+@@ -216,6 +273,12 @@ int is_vmalloc_or_module_addr(const void *x)
if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1;
#endif
@@ -97195,7 +97497,7 @@ index 0fdf968..2183ba3 100644
return is_vmalloc_addr(x);
}
-@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+@@ -236,8 +299,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
@@ -97210,7 +97512,31 @@ index 0fdf968..2183ba3 100644
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
-@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1175,13 +1244,23 @@ void __init vmalloc_init(void)
+ for_each_possible_cpu(i) {
+ struct vmap_block_queue *vbq;
+ struct vfree_deferred *p;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ struct stack_deferred *p2;
++#endif
+
+ vbq = &per_cpu(vmap_block_queue, i);
+ spin_lock_init(&vbq->lock);
+ INIT_LIST_HEAD(&vbq->free);
++
+ p = &per_cpu(vfree_deferred, i);
+ init_llist_head(&p->list);
+ INIT_WORK(&p->wq, free_work);
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ p2 = &per_cpu(stack_deferred, i);
++ init_llist_head(&p2->list.list);
++ INIT_WORK(&p2->wq, unmap_work);
++#endif
+ }
+
+ /* Import existing vmlist entries. */
+@@ -1309,6 +1388,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -97227,7 +97553,31 @@ index 0fdf968..2183ba3 100644
if (flags & VM_IOREMAP)
align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
-@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1514,6 +1603,23 @@ void vunmap(const void *addr)
+ }
+ EXPORT_SYMBOL(vunmap);
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++void unmap_process_stacks(struct task_struct *task)
++{
++ if (unlikely(in_interrupt())) {
++ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
++ struct stack_deferred_llist *list = task->stack;
++ list->stack = task->stack;
++ list->lowmem_stack = task->lowmem_stack;
++ if (llist_add((struct llist_node *)&list->list, &p->list.list))
++ schedule_work(&p->wq);
++ } else {
++ __vunmap(task->stack, 0);
++ free_memcg_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
++ }
++}
++#endif
++
+ /**
+ * vmap - map an array of pages into virtually contiguous space
+ * @pages: array of page pointers
+@@ -1534,6 +1640,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > totalram_pages)
return NULL;
@@ -97239,7 +97589,7 @@ index 0fdf968..2183ba3 100644
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area)
-@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+@@ -1634,6 +1745,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
@@ -97253,20 +97603,7 @@ index 0fdf968..2183ba3 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
start, end, node, gfp_mask, caller);
if (!area)
-@@ -1701,6 +1759,12 @@ static inline void *__vmalloc_node_flags(unsigned long size,
- node, __builtin_return_address(0));
- }
-
-+void *vmalloc_stack(int node)
-+{
-+ return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, PAGE_KERNEL,
-+ node, __builtin_return_address(0));
-+}
-+
- /**
- * vmalloc - allocate virtually contiguous memory
- * @size: allocation size
-@@ -1810,10 +1874,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1810,10 +1928,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -97278,7 +97615,7 @@ index 0fdf968..2183ba3 100644
NUMA_NO_NODE, __builtin_return_address(0));
}
-@@ -2120,6 +2183,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+@@ -2120,6 +2237,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
{
struct vm_struct *area;
@@ -97287,7 +97624,7 @@ index 0fdf968..2183ba3 100644
size = PAGE_ALIGN(size);
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
-@@ -2602,7 +2667,11 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2602,7 +2721,11 @@ static int s_show(struct seq_file *m, void *p)
v->addr, v->addr + v->size, v->size);
if (v->caller)
@@ -97823,7 +98160,7 @@ index 6afa3b4..7a14180 100644
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
rfc.mode != chan->mode)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
-index d58f76b..b69600a 100644
+index d4b7702..7122922 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -625,7 +625,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
@@ -98018,7 +98355,7 @@ index dcb75c0..24b1b43 100644
}
diff --git a/net/can/gw.c b/net/can/gw.c
-index ac31891..4799c17 100644
+index 050a211..bb9fe33 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
@@ -98237,7 +98574,7 @@ index a16ed7b..eb44d17 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index fccc195..c8486ab 100644
+index 4c1b483..3d45b13 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1688,14 +1688,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
@@ -98329,6 +98666,40 @@ index cf999e0..c59a975 100644
}
}
EXPORT_SYMBOL(dev_load);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index ca4231e..15b6792 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -267,6 +267,15 @@ again:
+ }
+ EXPORT_SYMBOL(dst_destroy);
+
++static void dst_destroy_rcu(struct rcu_head *head)
++{
++ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
++
++ dst = dst_destroy(dst);
++ if (dst)
++ __dst_free(dst);
++}
++
+ void dst_release(struct dst_entry *dst)
+ {
+ if (dst) {
+@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
+
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+ WARN_ON(newrefcnt < 0);
+- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
+- dst = dst_destroy(dst);
+- if (dst)
+- __dst_free(dst);
+- }
++ if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++ call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ }
+ }
+ EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index ebce437..9fed9d0 100644
--- a/net/core/filter.c
@@ -98626,7 +98997,7 @@ index fdac61c..e5e5b46 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 83b9d6a..cff1ce7 100644
+index aef1500..4b61acd 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -98736,10 +99107,10 @@ index e5ae776e..15c90cb 100644
}
diff --git a/net/core/sock.c b/net/core/sock.c
-index c0fc6bd..51d8326 100644
+index c806956..e5599ea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct sk_buff_head *list = &sk->sk_receive_queue;
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
@@ -98748,7 +99119,7 @@ index c0fc6bd..51d8326 100644
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
}
-@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
@@ -98757,7 +99128,7 @@ index c0fc6bd..51d8326 100644
return -ENOBUFS;
}
-@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
spin_lock_irqsave(&list->lock, flags);
@@ -98766,7 +99137,7 @@ index c0fc6bd..51d8326 100644
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
-@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL;
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
@@ -98775,7 +99146,7 @@ index c0fc6bd..51d8326 100644
goto discard_and_relse;
}
if (nested)
-@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
@@ -98784,7 +99155,7 @@ index c0fc6bd..51d8326 100644
goto discard_and_relse;
}
-@@ -949,12 +949,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct timeval tm;
} v;
@@ -98800,7 +99171,7 @@ index c0fc6bd..51d8326 100644
return -EINVAL;
memset(&v, 0, sizeof(v));
-@@ -1106,11 +1106,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_PEERNAME:
{
@@ -98814,7 +99185,7 @@ index c0fc6bd..51d8326 100644
return -EINVAL;
if (copy_to_user(optval, address, len))
return -EFAULT;
-@@ -1191,7 +1191,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (len > lv)
len = lv;
@@ -98823,7 +99194,7 @@ index c0fc6bd..51d8326 100644
return -EFAULT;
lenout:
if (put_user(len, optlen))
-@@ -2326,7 +2326,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
@@ -98832,7 +99203,7 @@ index c0fc6bd..51d8326 100644
}
EXPORT_SYMBOL(sock_init_data);
-@@ -2454,6 +2454,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
+@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
int level, int type)
{
@@ -98840,7 +99211,7 @@ index c0fc6bd..51d8326 100644
struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2;
int copied, err;
-@@ -2475,7 +2476,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
@@ -98851,7 +99222,7 @@ index c0fc6bd..51d8326 100644
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
-index 6a7fae2..d7c22e6 100644
+index c38e7a2..773e3d7 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -9,26 +9,33 @@
@@ -98994,7 +99365,7 @@ index 4c04848..f575934 100644
static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
-index a603823..a36ee0b 100644
+index 3b726f3..1af6368 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
@@ -99028,21 +99399,6 @@ index 5325b54..a0d4d69 100644
return -EFAULT;
*lenp = len;
-diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
-index 1846c1f..96d4a9f 100644
---- a/net/ieee802154/dgram.c
-+++ b/net/ieee802154/dgram.c
-@@ -313,8 +313,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
- if (saddr) {
- saddr->family = AF_IEEE802154;
- saddr->addr = mac_cb(skb)->sa;
-+ }
-+ if (addr_len)
- *addr_len = sizeof(*saddr);
-- }
-
- if (flags & MSG_TRUNC)
- copied = skb->len;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 19ab78a..bf575c9 100644
--- a/net/ipv4/af_inet.c
@@ -99346,6 +99702,42 @@ index 580dd96..9fcef7e 100644
msg.msg_controllen = len;
msg.msg_flags = flags;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 0c3a5d1..c05c07d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
+ {
+ struct dst_entry *old_dst;
+
+- if (dst) {
+- if (dst->flags & DST_NOCACHE)
+- dst = NULL;
+- else
+- dst_clone(dst);
+- }
++ dst_clone(dst);
+ old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
+ dst_release(old_dst);
+ }
+@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
+
+ rcu_read_lock();
+ dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
++ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++ dst = NULL;
+ if (dst) {
+ if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+- rcu_read_unlock();
+ tunnel_dst_reset(t);
+- return NULL;
++ dst_release(dst);
++ dst = NULL;
+ }
+- dst_hold(dst);
+ }
+ rcu_read_unlock();
+ return (struct rtable *)dst;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e4a8f76..dd8ad72 100644
--- a/net/ipv4/ip_vti.c
@@ -99400,7 +99792,7 @@ index b3e86ea..18ce98c 100644
return res;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
-index 812b183..56cbe9c 100644
+index 62eaa00..29b2dc2 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
@@ -99533,7 +99925,7 @@ index 2510c02..cfb34fa 100644
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index e21934b..3ae545c 100644
+index e21934b..4e7cb58 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -59,7 +59,7 @@ struct ping_table {
@@ -99572,28 +99964,7 @@ index e21934b..3ae545c 100644
info, (u8 *)icmph);
#endif
}
-@@ -844,6 +844,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- {
- struct inet_sock *isk = inet_sk(sk);
- int family = sk->sk_family;
-+ struct sockaddr_in *sin;
-+ struct sockaddr_in6 *sin6;
- struct sk_buff *skb;
- int copied, err;
-
-@@ -853,12 +855,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- if (flags & MSG_OOB)
- goto out;
-
-+ if (addr_len) {
-+ if (family == AF_INET)
-+ *addr_len = sizeof(*sin);
-+ else if (family == AF_INET6 && addr_len)
-+ *addr_len = sizeof(*sin6);
-+ }
-+
- if (flags & MSG_ERRQUEUE) {
- if (family == AF_INET) {
+@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
@@ -99602,19 +99973,7 @@ index e21934b..3ae545c 100644
addr_len);
#endif
}
-@@ -890,7 +899,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- sin->sin_port = 0 /* skb->h.uh->source */;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-- *addr_len = sizeof(*sin);
- }
-
- if (isk->cmsg_flags)
-@@ -912,14 +920,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- sin6->sin6_scope_id =
- ipv6_iface_scope_id(&sin6->sin6_addr,
- IP6CB(skb)->iif);
-- *addr_len = sizeof(*sin6);
+@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
if (inet6_sk(sk)->rxopt.all)
@@ -99627,7 +99986,7 @@ index e21934b..3ae545c 100644
else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
-@@ -1111,7 +1118,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -99637,7 +99996,7 @@ index e21934b..3ae545c 100644
static int ping_v4_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
-index c04518f..824ebe5 100644
+index c04518f..c402063 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -99649,25 +100008,7 @@ index c04518f..824ebe5 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- if (flags & MSG_OOB)
- goto out;
-
-+ if (addr_len)
-+ *addr_len = sizeof(*sin);
-+
- if (flags & MSG_ERRQUEUE) {
- err = ip_recv_error(sk, msg, len, addr_len);
- goto out;
-@@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- sin->sin_port = 0;
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-- *addr_len = sizeof(*sin);
- }
- if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
-@@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
+@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk)
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
{
@@ -99689,7 +100030,7 @@ index c04518f..824ebe5 100644
if (get_user(len, optlen))
goto out;
-@@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
+@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
if (len > sizeof(struct icmp_filter))
len = sizeof(struct icmp_filter);
ret = -EFAULT;
@@ -99700,7 +100041,7 @@ index c04518f..824ebe5 100644
goto out;
ret = 0;
out: return ret;
-@@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0, sock_i_ino(sp),
@@ -99916,7 +100257,7 @@ index 44eba05..b36864b 100644
hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
if (hdr == NULL)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index eeaac39..dc29942 100644
+index e364746..598e76e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -761,7 +761,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
@@ -99928,7 +100269,7 @@ index eeaac39..dc29942 100644
sk->sk_max_pacing_rate);
}
-@@ -4485,7 +4485,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+@@ -4484,7 +4484,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
* simplifies code)
*/
static void
@@ -99937,7 +100278,7 @@ index eeaac39..dc29942 100644
struct sk_buff *head, struct sk_buff *tail,
u32 start, u32 end)
{
-@@ -5562,6 +5562,7 @@ discard:
+@@ -5561,6 +5561,7 @@ discard:
tcp_paws_reject(&tp->rx_opt, 0))
goto discard_and_undo;
@@ -99945,7 +100286,7 @@ index eeaac39..dc29942 100644
if (th->syn) {
/* We see SYN without ACK. It is attempt of
* simultaneous connect with crossed SYNs.
-@@ -5612,6 +5613,7 @@ discard:
+@@ -5611,6 +5612,7 @@ discard:
goto discard;
#endif
}
@@ -99953,7 +100294,7 @@ index eeaac39..dc29942 100644
/* "fifth, if neither of the SYN or RST bits is set then
* drop the segment and return."
*/
-@@ -5658,7 +5660,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+@@ -5657,7 +5659,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
if (th->syn) {
@@ -100091,7 +100432,7 @@ index 64f0354..a81b39d 100644
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 77bd16f..5f7174a 100644
+index b25e852..cdc3258 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -87,6 +87,7 @@
@@ -100151,20 +100492,7 @@ index 77bd16f..5f7174a 100644
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
-@@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- int is_udplite = IS_UDPLITE(sk);
- bool slow;
-
-+ /*
-+ * Check any passed addresses
-+ */
-+ if (addr_len)
-+ *addr_len = sizeof(*sin);
-+
- if (flags & MSG_ERRQUEUE)
- return ip_recv_error(sk, msg, len, addr_len);
-
-@@ -1243,6 +1266,10 @@ try_again:
+@@ -1243,6 +1260,10 @@ try_again:
if (!skb)
goto out;
@@ -100175,7 +100503,7 @@ index 77bd16f..5f7174a 100644
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
-@@ -1276,7 +1303,7 @@ try_again:
+@@ -1276,7 +1297,7 @@ try_again:
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
@@ -100184,15 +100512,7 @@ index 77bd16f..5f7174a 100644
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
-@@ -1295,7 +1322,6 @@ try_again:
- sin->sin_port = udp_hdr(skb)->source;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-- *addr_len = sizeof(*sin);
- }
- if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
-@@ -1566,7 +1592,7 @@ csum_error:
+@@ -1566,7 +1587,7 @@ csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -100201,7 +100521,7 @@ index 77bd16f..5f7174a 100644
kfree_skb(skb);
return -1;
}
-@@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+@@ -1585,7 +1606,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -100210,7 +100530,7 @@ index 77bd16f..5f7174a 100644
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+@@ -1786,6 +1807,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -100220,7 +100540,7 @@ index 77bd16f..5f7174a 100644
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
-@@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -2354,7 +2378,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -100418,10 +100738,10 @@ index 2465d18..bc5bf7f 100644
.maxtype = IFLA_GRE_MAX,
.policy = ip6gre_policy,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
-index 0e51f68..1f501e1 100644
+index 9120339..cfdd84f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
-@@ -85,7 +85,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
@@ -100430,7 +100750,7 @@ index 0e51f68..1f501e1 100644
static int ip6_tnl_net_id __read_mostly;
struct ip6_tnl_net {
-@@ -1714,7 +1714,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
+@@ -1715,7 +1715,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
};
@@ -100566,36 +100886,27 @@ index 767ab8d..c5ec70a 100644
return -ENOMEM;
}
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
-index 827f795..bdff9eb 100644
+index b31a012..c36f09c 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
-@@ -9,8 +9,8 @@
+@@ -9,7 +9,7 @@
void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
{
- static atomic_t ipv6_fragmentation_id;
-- int old, new;
+ static atomic_unchecked_t ipv6_fragmentation_id;
-+ int id;
+ int ident;
#if IS_ENABLED(CONFIG_IPV6)
- if (rt && !(rt->dst.flags & DST_NOPEER)) {
-@@ -26,13 +26,8 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+@@ -26,7 +26,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
}
}
#endif
-- do {
-- old = atomic_read(&ipv6_fragmentation_id);
-- new = old + 1;
-- if (!new)
-- new = 1;
-- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
-- fhdr->identification = htonl(new);
-+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
-+ fhdr->identification = htonl(id);
+- ident = atomic_inc_return(&ipv6_fragmentation_id);
++ ident = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
+ fhdr->identification = htonl(ident);
}
EXPORT_SYMBOL(ipv6_select_ident);
-
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bda7429..469b26b 100644
--- a/net/ipv6/ping.c
@@ -100670,7 +100981,7 @@ index 091d066..139d410 100644
goto proc_dev_snmp6_fail;
return 0;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
-index 1f29996..46fe0c7 100644
+index 1f29996..7418779 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -100700,25 +101011,7 @@ index 1f29996..46fe0c7 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -469,6 +469,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- if (flags & MSG_OOB)
- return -EOPNOTSUPP;
-
-+ if (addr_len)
-+ *addr_len=sizeof(*sin6);
-+
- if (flags & MSG_ERRQUEUE)
- return ipv6_recv_error(sk, msg, len, addr_len);
-
-@@ -507,7 +510,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- sin6->sin6_flowinfo = 0;
- sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
- IP6CB(skb)->iif);
-- *addr_len = sizeof(*sin6);
- }
-
- sock_recv_ts_and_drops(msg, sk, skb);
-@@ -610,7 +612,7 @@ out:
+@@ -610,7 +610,7 @@ out:
return err;
}
@@ -100727,7 +101020,7 @@ index 1f29996..46fe0c7 100644
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
-@@ -922,12 +924,15 @@ do_confirm:
+@@ -922,12 +922,15 @@ do_confirm:
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
@@ -100744,7 +101037,7 @@ index 1f29996..46fe0c7 100644
return 0;
default:
return -ENOPROTOOPT;
-@@ -940,6 +945,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+@@ -940,6 +943,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
@@ -100752,7 +101045,7 @@ index 1f29996..46fe0c7 100644
switch (optname) {
case ICMPV6_FILTER:
-@@ -951,7 +957,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+@@ -951,7 +955,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
len = sizeof(struct icmp6_filter);
if (put_user(len, optlen))
return -EFAULT;
@@ -100818,7 +101111,7 @@ index 7cc1102..7785931 100644
table = kmemdup(ipv6_route_table_template,
sizeof(ipv6_route_table_template),
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
-index b4d74c8..b4f3fbe 100644
+index fe548ba..0dfa744 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
@@ -100912,7 +101205,7 @@ index 889079b..a04512c 100644
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index 1e586d9..384a9c9 100644
+index 20b63d2..31a777d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
@@ -100926,17 +101219,7 @@ index 1e586d9..384a9c9 100644
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-@@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
- int is_udp4;
- bool slow;
-
-+ if (addr_len)
-+ *addr_len = sizeof(struct sockaddr_in6);
-+
- if (flags & MSG_ERRQUEUE)
- return ipv6_recv_error(sk, msg, len, addr_len);
-
-@@ -435,7 +442,7 @@ try_again:
+@@ -435,7 +439,7 @@ try_again:
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
@@ -100945,16 +101228,7 @@ index 1e586d9..384a9c9 100644
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
-@@ -475,7 +482,7 @@ try_again:
- ipv6_iface_scope_id(&sin6->sin6_addr,
- IP6CB(skb)->iif);
- }
-- *addr_len = sizeof(*sin6);
-+
- }
-
- if (np->rxopt.all)
-@@ -690,7 +697,7 @@ csum_error:
+@@ -690,7 +694,7 @@ csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -100963,7 +101237,7 @@ index 1e586d9..384a9c9 100644
kfree_skb(skb);
return -1;
}
-@@ -747,7 +754,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+@@ -747,7 +751,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
if (likely(skb1 == NULL))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -100972,7 +101246,7 @@ index 1e586d9..384a9c9 100644
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -886,6 +893,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+@@ -886,6 +890,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -101137,7 +101411,7 @@ index b9ac598..f88cc56 100644
return;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
-index c4b7218..c7e9f14 100644
+index 1465363..c7e9f14 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
@@ -101153,15 +101427,6 @@ index c4b7218..c7e9f14 100644
}
write_unlock_bh(&iucv_sk_list.lock);
-@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
- spin_lock_irqsave(&list->lock, flags);
-
- while (list_skb != (struct sk_buff *)list) {
-- if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
-+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
- this = list_skb;
- break;
- }
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index cd5b8ec..f205e6b 100644
--- a/net/iucv/iucv.c
@@ -101192,28 +101457,6 @@ index 7932697..a13d158 100644
} while (!res);
return res;
}
-diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
-index 0b44d85..1a7f88b 100644
---- a/net/l2tp/l2tp_ip.c
-+++ b/net/l2tp/l2tp_ip.c
-@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
- if (flags & MSG_OOB)
- goto out;
-
-+ if (addr_len)
-+ *addr_len = sizeof(*sin);
-+
- skb = skb_recv_datagram(sk, flags, noblock, &err);
- if (!skb)
- goto out;
-@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- sin->sin_port = 0;
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-- *addr_len = sizeof(*sin);
- }
- if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 1a3c7e0..80f8b0c 100644
--- a/net/llc/llc_proc.c
@@ -101282,7 +101525,7 @@ index b127902..9dc4947 100644
/* number of interfaces with corresponding FIF_ flags */
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
-index ce1c443..6cd39e1 100644
+index 8f7fabc..e400523 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -529,7 +529,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
@@ -101527,7 +101770,7 @@ index a8eb0a8..86f2de4 100644
if (!todrop_rate[i]) return 0;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index 4f26ee4..6a9d7c3 100644
+index 3d2d2c8..c87e4d3 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
@@ -101539,7 +101782,7 @@ index 4f26ee4..6a9d7c3 100644
ip_vs_conn_put(cp);
return ret;
}
-@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
pkts = sysctl_sync_threshold(ipvs);
else
@@ -101717,7 +101960,7 @@ index a4b5e2a..13b1de3 100644
table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
GFP_KERNEL);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
-index 356bef5..99932cb 100644
+index 356bef5..163b56a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1627,6 +1627,10 @@ void nf_conntrack_init_end(void)
@@ -101736,7 +101979,7 @@ index 356bef5..99932cb 100644
}
+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
++ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
+#else
net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+#endif
@@ -102045,7 +102288,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 04748ab6..c72ef1f 100644
+index 7f40fd2..c72ef1f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk)
@@ -102057,137 +102300,7 @@ index 04748ab6..c72ef1f 100644
}
static void netlink_rcv_wake(struct sock *sk)
-@@ -1360,7 +1360,74 @@ retry:
- return err;
- }
-
--static inline int netlink_capable(const struct socket *sock, unsigned int flag)
-+/**
-+ * __netlink_ns_capable - General netlink message capability test
-+ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
-+ * @user_ns: The user namespace of the capability to use
-+ * @cap: The capability to use
-+ *
-+ * Test to see if the opener of the socket we received the message
-+ * from had when the netlink socket was created and the sender of the
-+ * message has has the capability @cap in the user namespace @user_ns.
-+ */
-+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
-+ struct user_namespace *user_ns, int cap)
-+{
-+ return ((nsp->flags & NETLINK_SKB_DST) ||
-+ file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
-+ ns_capable(user_ns, cap);
-+}
-+EXPORT_SYMBOL(__netlink_ns_capable);
-+
-+/**
-+ * netlink_ns_capable - General netlink message capability test
-+ * @skb: socket buffer holding a netlink command from userspace
-+ * @user_ns: The user namespace of the capability to use
-+ * @cap: The capability to use
-+ *
-+ * Test to see if the opener of the socket we received the message
-+ * from had when the netlink socket was created and the sender of the
-+ * message has has the capability @cap in the user namespace @user_ns.
-+ */
-+bool netlink_ns_capable(const struct sk_buff *skb,
-+ struct user_namespace *user_ns, int cap)
-+{
-+ return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
-+}
-+EXPORT_SYMBOL(netlink_ns_capable);
-+
-+/**
-+ * netlink_capable - Netlink global message capability test
-+ * @skb: socket buffer holding a netlink command from userspace
-+ * @cap: The capability to use
-+ *
-+ * Test to see if the opener of the socket we received the message
-+ * from had when the netlink socket was created and the sender of the
-+ * message has has the capability @cap in all user namespaces.
-+ */
-+bool netlink_capable(const struct sk_buff *skb, int cap)
-+{
-+ return netlink_ns_capable(skb, &init_user_ns, cap);
-+}
-+EXPORT_SYMBOL(netlink_capable);
-+
-+/**
-+ * netlink_net_capable - Netlink network namespace message capability test
-+ * @skb: socket buffer holding a netlink command from userspace
-+ * @cap: The capability to use
-+ *
-+ * Test to see if the opener of the socket we received the message
-+ * from had when the netlink socket was created and the sender of the
-+ * message has has the capability @cap over the network namespace of
-+ * the socket we received the message from.
-+ */
-+bool netlink_net_capable(const struct sk_buff *skb, int cap)
-+{
-+ return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
-+}
-+EXPORT_SYMBOL(netlink_net_capable);
-+
-+static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
- {
- return (nl_table[sock->sk->sk_protocol].flags & flag) ||
- ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
-@@ -1428,7 +1495,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
-
- /* Only superuser is allowed to listen multicasts */
- if (nladdr->nl_groups) {
-- if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
-+ if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
- return -EPERM;
- err = netlink_realloc_groups(sk);
- if (err)
-@@ -1490,7 +1557,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
- return -EINVAL;
-
- if ((nladdr->nl_groups || nladdr->nl_pid) &&
-- !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
-+ !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
- return -EPERM;
-
- if (!nlk->portid)
-@@ -2096,7 +2163,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
- break;
- case NETLINK_ADD_MEMBERSHIP:
- case NETLINK_DROP_MEMBERSHIP: {
-- if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
-+ if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
- return -EPERM;
- err = netlink_realloc_groups(sk);
- if (err)
-@@ -2228,6 +2295,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
- struct sk_buff *skb;
- int err;
- struct scm_cookie scm;
-+ u32 netlink_skb_flags = 0;
-
- if (msg->msg_flags&MSG_OOB)
- return -EOPNOTSUPP;
-@@ -2247,8 +2315,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
- dst_group = ffs(addr->nl_groups);
- err = -EPERM;
- if ((dst_group || dst_portid) &&
-- !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
-+ !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
- goto out;
-+ netlink_skb_flags |= NETLINK_SKB_DST;
- } else {
- dst_portid = nlk->dst_portid;
- dst_group = nlk->dst_group;
-@@ -2278,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
- NETLINK_CB(skb).portid = nlk->portid;
- NETLINK_CB(skb).dst_group = dst_group;
- NETLINK_CB(skb).creds = siocb->scm->creds;
-+ NETLINK_CB(skb).flags = netlink_skb_flags;
-
- err = -EFAULT;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
-@@ -2933,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -3003,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
@@ -104436,7 +104549,7 @@ index 0865b3e..7235dd4 100644
__ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
__ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
-index f46e4dd..090e168 100644
+index 152d4d2..791684c 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -291,6 +291,7 @@ fi
@@ -105585,7 +105698,7 @@ index f79fa8b..6161868 100644
};
extern struct ima_h_table ima_htable;
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
-index c38bbce..f45133d 100644
+index 025824a..2a681b1 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -137,7 +137,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
@@ -114586,7 +114699,7 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..8972f81
+index 0000000..4077712
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
@@ -0,0 +1,5988 @@
@@ -116400,8 +116513,8 @@ index 0000000..8972f81
+attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
+rtw_set_wps_probe_resp_19989 rtw_set_wps_probe_resp 3 19989 NULL
+diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
-+lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 NULL nohasharray
-+event_trigger_write_20009 event_trigger_write 3 20009 &lov_stripe_md_size_20009
++event_trigger_write_20009 event_trigger_write 3 20009 NULL nohasharray
++lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 &event_trigger_write_20009
+tree_mod_log_eb_move_20011 tree_mod_log_eb_move 5 20011 NULL
+SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL
+split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
@@ -116768,8 +116881,8 @@ index 0000000..8972f81
+bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
+ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
+ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
-+nouveau_clock_create__23881 nouveau_clock_create_ 5 23881 NULL nohasharray
-+writeback_single_inode_23881 writeback_single_inode 0 23881 &nouveau_clock_create__23881
++writeback_single_inode_23881 writeback_single_inode 0 23881 NULL nohasharray
++nouveau_clock_create__23881 nouveau_clock_create_ 5 23881 &writeback_single_inode_23881
+tipc_snprintf_23893 tipc_snprintf 2-0 23893 NULL
+add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
+ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911