diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2012-05-14 13:37:42 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2012-05-14 13:38:45 +0000 |
commit | 7cbfc3e586006f3ed1a141fbcab7dfe6755be32d (patch) | |
tree | cfc165ccf333c152874cae8f98a1cb9c96e20bf3 | |
parent | 168b1d4d53fb306f0ac48f54973601bf7b74e4c9 (diff) | |
download | aports-7cbfc3e586006f3ed1a141fbcab7dfe6755be32d.tar.bz2 aports-7cbfc3e586006f3ed1a141fbcab7dfe6755be32d.tar.xz |
main/linux-grsec: upgrade to 3.3.6 kernel
-rw-r--r-- | main/linux-grsec/APKBUILD | 8 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-2.9-3.3.6-201205131658.patch (renamed from main/linux-grsec/grsecurity-2.9-3.3.5-201205071839.patch) | 773 |
2 files changed, 621 insertions, 160 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index 1a4e1e713f..3a85b1ba79 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -2,7 +2,7 @@ _flavor=grsec pkgname=linux-${_flavor} -pkgver=3.3.5 +pkgver=3.3.6 _kernver=3.3 pkgrel=0 pkgdesc="Linux kernel with grsecurity" @@ -14,7 +14,7 @@ _config=${config:-kernelconfig.${CARCH}} install= source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz - grsecurity-2.9-3.3.5-201205071839.patch + grsecurity-2.9-3.3.6-201205131658.patch 0004-arp-flush-arp-cache-on-device-change.patch @@ -138,8 +138,8 @@ dev() { } md5sums="7133f5a2086a7d7ef97abac610c094f5 linux-3.3.tar.xz -d346edca5d3de7052f49996b01cef401 patch-3.3.5.xz -1c2f2313347889b313f8af1212c708bf grsecurity-2.9-3.3.5-201205071839.patch +a7f67e9c491403906e4bb475de194631 patch-3.3.6.xz +47553b5150ed81a8ee1a4d9fec2688e0 grsecurity-2.9-3.3.6-201205131658.patch 776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch 5d2818cb5329aec600ee8ffc3896a728 kernelconfig.x86 39552b468a33a04678113c12ec6c1a91 kernelconfig.x86_64" diff --git a/main/linux-grsec/grsecurity-2.9-3.3.5-201205071839.patch b/main/linux-grsec/grsecurity-2.9-3.3.6-201205131658.patch index 222eccde75..0bad506a6b 100644 --- a/main/linux-grsec/grsecurity-2.9-3.3.5-201205071839.patch +++ b/main/linux-grsec/grsecurity-2.9-3.3.6-201205131658.patch @@ -195,7 +195,7 @@ index d99fd9c..8689fef 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index 64615e9..64d72ce 100644 +index 9cd6941..92e68ff 100644 --- a/Makefile +++ b/Makefile @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -1457,6 +1457,36 @@ index e4c96cc..1145653 100644 #endif /* __ASSEMBLY__ */ #define arch_align_stack(x) (x) +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index d4c24d4..4ac53e8 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -141,6 +141,12 @@ extern void vfp_flush_hwstate(struct thread_info *); + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 ++ ++/* within 8 bits of TIF_SYSCALL_TRACE ++ to meet flexible second operand requirements ++*/ ++#define TIF_GRSEC_SETXID 10 ++ + #define TIF_POLLING_NRFLAG 16 + #define TIF_USING_IWMMXT 17 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +@@ -156,9 +162,11 @@ extern void vfp_flush_hwstate(struct thread_info *); + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) + #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* Checks for any syscall work in entry-common.S */ +-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ ++ _TIF_GRSEC_SETXID) + + /* + * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 2958976..12ccac4 100644 --- a/arch/arm/include/asm/uaccess.h @@ -1568,6 +1598,30 @@ index 971d65c..cc936fb 100644 #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index f5ce8ab..4b73893 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -905,10 +905,19 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) + { + unsigned long ip; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (why) + audit_syscall_exit(regs); + else diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a255c39..4a19b25 100644 --- a/arch/arm/kernel/setup.c @@ -2791,6 +2845,40 @@ index 6018c80..7c37203 100644 +#define arch_align_stack(x) ((x) & ~0xfUL) #endif /* _ASM_SYSTEM_H */ +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h +index 0d85d8e..ec71487 100644 +--- a/arch/mips/include/asm/thread_info.h ++++ b/arch/mips/include/asm/thread_info.h +@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); + #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */ + #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */ + #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ ++/* li takes a 32bit immediate */ ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */ + #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ + + #ifdef CONFIG_MIPS32_O32 +@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28"); + #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) + #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) ++ ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ +-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) ++#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK (0x0000ffef & \ + ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) + /* work to do on any return to u-space */ +-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) ++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID) + + #endif /* __KERNEL__ */ + diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 9fdd8bc..4bd7f1a 100644 --- a/arch/mips/kernel/binfmt_elfn32.c @@ -2847,6 +2935,85 @@ index 7955409..ceaea7c 100644 - - return sp & ALMASK; -} +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 7786b60..3e38c72 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -529,6 +529,10 @@ static inline int audit_arch(void) + return arch; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace +@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) + /* do the secure computing check first */ + secure_computing(regs->regs[2]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (!(current->ptrace & PT_PTRACED)) + goto out; + +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S +index a632bc1..0b77c7c 100644 +--- a/arch/mips/kernel/scall32-o32.S ++++ b/arch/mips/kernel/scall32-o32.S +@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp) + + stack_done: + lw t0, TI_FLAGS($28) # syscall tracing enabled? +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + and t0, t1 + bnez t0, syscall_trace_entry # -> yes + +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S +index 3b5a5e9..e1ee86d 100644 +--- a/arch/mips/kernel/scall64-64.S ++++ b/arch/mips/kernel/scall64-64.S +@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, syscall_trace_entry +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S +index 6be6f70..1859577 100644 +--- a/arch/mips/kernel/scall64-n32.S ++++ b/arch/mips/kernel/scall64-n32.S +@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, n32_syscall_trace_entry +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S +index 5422855..74e63a3 100644 +--- a/arch/mips/kernel/scall64-o32.S ++++ b/arch/mips/kernel/scall64-o32.S +@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp) + PTR 4b, bad_stack + .previous + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, trace_a_syscall diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 69ebd58..e4bff83 100644 --- a/arch/mips/mm/fault.c @@ -3689,6 +3856,40 @@ index c377457..3c69fbc 100644 /* Used in very early kernel initialization. */ extern unsigned long reloc_offset(void); +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index 96471494..60ed5a2 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -104,13 +104,15 @@ static inline struct thread_info *current_thread_info(void) + #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SINGLESTEP 8 /* singlestepping active */ +-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ + #define TIF_SECCOMP 10 /* secure computing */ + #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ + #define TIF_NOERROR 12 /* Force successful syscall return */ + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ + #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ + #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ ++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */ ++#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -128,8 +130,11 @@ static inline struct thread_info *current_thread_info(void) + #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) ++ + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ +- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \ ++ _TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index bd0fb84..a42a14b 100644 --- a/arch/powerpc/include/asm/uaccess.h @@ -4065,6 +4266,45 @@ index d817ab0..b23b18e 100644 - - return ret; -} +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c +index 5b43325..94a5bb4 100644 +--- a/arch/powerpc/kernel/ptrace.c ++++ b/arch/powerpc/kernel/ptrace.c +@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) + + secure_computing(regs->gpr[0]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + /* +@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs) + { + int step; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 836a5a1..27289a3 100644 --- a/arch/powerpc/kernel/signal_32.c @@ -5253,7 +5493,7 @@ index c2a1080..21ed218 100644 /* diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h -index 01d057f..0a02f7e 100644 +index 01d057f..13a7d2f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -63,6 +63,8 @@ struct thread_info { @@ -5265,6 +5505,38 @@ index 01d057f..0a02f7e 100644 unsigned long fpregs[0] __attribute__ ((aligned(64))); }; +@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ + /* flag bit 6 is available */ + #define TIF_32BIT 7 /* 32-bit binary */ +-/* flag bit 8 is available */ ++#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */ + #define TIF_SECCOMP 9 /* secure computing */ + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ ++ + /* NOTE: Thread flags >= 12 should be ones we have no interest + * in using in assembly, else we can't use the mask as + * an immediate value in instructions such as andcc. +@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ + _TIF_DO_NOTIFY_RESUME_MASK | \ + _TIF_NEED_RESCHED) + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING) + ++#define _TIF_WORK_SYSCALL \ ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \ ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) ++ ++ + /* + * Thread-synchronous status. + * diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index e88fbe5..96b0ce5 100644 --- a/arch/sparc/include/asm/uaccess.h @@ -5475,6 +5747,45 @@ index 39d8b05..d1a7d90 100644 (void *) gp->tpc, (void *) gp->o7, (void *) gp->i7, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 9388844..0075fd2 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace_enter(struct pt_regs *regs) + { + int ret = 0; +@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) + /* do the secure computing check first */ + secure_computing(regs->u_regs[UREG_G1]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + +@@ -1085,6 +1094,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) + + asmlinkage void syscall_trace_leave(struct pt_regs *regs) + { ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 42b282f..28ce9f2 100644 --- a/arch/sparc/kernel/sys_sparc_32.c @@ -5648,6 +5959,55 @@ index 232df99..cee1f9c 100644 mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S +index 1d7e274..b39c527 100644 +--- a/arch/sparc/kernel/syscalls.S ++++ b/arch/sparc/kernel/syscalls.S +@@ -62,7 +62,7 @@ sys32_rt_sigreturn: + #endif + .align 32 + 1: ldx [%g6 + TI_FLAGS], %l5 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0 + be,pt %icc, rtrap + nop + call syscall_trace_leave +@@ -179,7 +179,7 @@ linux_sparc_syscall32: + + srl %i5, 0, %o5 ! IEU1 + srl %i2, 0, %o2 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace32 ! CTI + mov %i0, %l5 ! IEU1 + call %l7 ! CTI Group brk forced +@@ -202,7 +202,7 @@ linux_sparc_syscall: + + mov %i3, %o3 ! IEU1 + mov %i4, %o4 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace ! CTI Group + mov %i0, %l5 ! IEU0 + 2: call %l7 ! CTI Group brk forced +@@ -226,7 +226,7 @@ ret_sys_call: + + cmp %o0, -ERESTART_RESTARTBLOCK + bgeu,pn %xcc, 1f +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6 + 80: + /* System call success, clear Carry condition code. */ + andn %g3, %g2, %g3 +@@ -241,7 +241,7 @@ ret_sys_call: + /* System call failure, set Carry condition code. + * Also, get abs(errno) to return to the process. + */ +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 ++ andcc %l0, _TIF_WORK_SYSCALL, %l6 + sub %g0, %o0, %o0 + or %g3, %g2, %g3 + stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 591f20c..0f1b925 100644 --- a/arch/sparc/kernel/traps_32.c @@ -7519,7 +7879,7 @@ index 7116dcb..d9ae1d7 100644 #endif diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c -index 89bbf4e..869908e 100644 +index e77f4e4..17e511f 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -13,8 +13,11 @@ @@ -7624,7 +7984,7 @@ index 89bbf4e..869908e 100644 rel->r_info = elf32_to_cpu(rel->r_info); } } -@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) +@@ -396,13 +440,13 @@ static void read_relocs(FILE *fp) static void print_absolute_symbols(void) { @@ -7635,13 +7995,12 @@ index 89bbf4e..869908e 100644 for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - Elf32_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; -@@ -431,14 +475,14 @@ static void print_absolute_symbols(void) +@@ -429,14 +473,14 @@ static void print_absolute_symbols(void) static void print_absolute_relocs(void) { @@ -7658,7 +8017,7 @@ index 89bbf4e..869908e 100644 if (sec->shdr.sh_type != SHT_REL) { continue; } -@@ -499,13 +543,13 @@ static void print_absolute_relocs(void) +@@ -497,13 +541,13 @@ static void print_absolute_relocs(void) static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) { @@ -7674,7 +8033,7 @@ index 89bbf4e..869908e 100644 struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL) { -@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) +@@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) !is_rel_reloc(sym_name(sym_strtab, sym))) { continue; } @@ -7697,7 +8056,7 @@ index 89bbf4e..869908e 100644 switch (r_type) { case R_386_NONE: case R_386_PC32: -@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb) +@@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb) static void emit_relocs(int as_text) { @@ -7706,7 +8065,7 @@ index 89bbf4e..869908e 100644 /* Count how many relocations I have and allocate space for them. */ reloc_count = 0; walk_relocs(count_reloc); -@@ -665,6 +725,7 @@ int main(int argc, char **argv) +@@ -663,6 +723,7 @@ int main(int argc, char **argv) fname, strerror(errno)); } read_ehdr(fp); @@ -12132,7 +12491,7 @@ index 2d2f01c..f985723 100644 /* * Force strict CPU ordering. diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h -index cfd8144..1b1127d 100644 +index cfd8144..664ac89 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -10,6 +10,7 @@ @@ -12182,7 +12541,45 @@ index cfd8144..1b1127d 100644 #define init_stack (init_thread_union.stack) #else /* !__ASSEMBLY__ */ -@@ -169,45 +163,40 @@ struct thread_info { +@@ -95,6 +89,7 @@ struct thread_info { + #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ + #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */ + + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +@@ -116,16 +111,17 @@ struct thread_info { + #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) + #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_enter() */ + #define _TIF_WORK_SYSCALL_ENTRY \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ +- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ + #define _TIF_WORK_SYSCALL_EXIT \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ +- _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK \ +@@ -135,7 +131,8 @@ struct thread_info { + + /* work to do on any return to user space */ + #define _TIF_ALLWORK_MASK \ +- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) ++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ ++ _TIF_GRSEC_SETXID) + + /* Only used for 64 bit */ + #define _TIF_DO_NOTIFY_MASK \ +@@ -169,45 +166,40 @@ struct thread_info { ret; \ }) @@ -12253,7 +12650,7 @@ index cfd8144..1b1127d 100644 /* * macros/functions for gaining access to the thread information structure * preempt_count needs to be 1 initially, until the scheduler is functional. -@@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void) +@@ -215,27 +207,8 @@ static inline struct thread_info *current_thread_info(void) #ifndef __ASSEMBLY__ DECLARE_PER_CPU(unsigned long, kernel_stack); @@ -12283,7 +12680,7 @@ index cfd8144..1b1127d 100644 #endif #endif /* !X86_32 */ -@@ -269,5 +239,16 @@ extern void arch_task_cache_init(void); +@@ -269,5 +242,16 @@ extern void arch_task_cache_init(void); extern void free_thread_info(struct thread_info *ti); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define arch_task_cache_init arch_task_cache_init @@ -14606,7 +15003,7 @@ index 9b9f18b..9fcaa04 100644 #include <asm/processor.h> #include <asm/fcntl.h> diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S -index 7b784f4..76aaad7 100644 +index 7b784f4..db6b628 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -179,13 +179,146 @@ @@ -14799,7 +15196,7 @@ index 7b784f4..76aaad7 100644 +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + -+ PAX_EXIT_KERNEL ++ pax_exit_kernel + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace @@ -18533,7 +18930,7 @@ index cfa5c90..4facd28 100644 ip = *(u64 *)(fp+8); if (!in_sched_functions(ip)) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c -index 5026738..e1b5aa8 100644 +index 5026738..574f70a 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target, @@ -18582,6 +18979,41 @@ index 5026738..e1b5aa8 100644 } void user_single_step_siginfo(struct task_struct *tsk, +@@ -1361,6 +1365,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + # define IS_IA32 0 + #endif + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1369,6 +1377,11 @@ long syscall_trace_enter(struct pt_regs *regs) + { + long ret = 0; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + /* + * If we stepped into a sysenter/syscall insn, it trapped in + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. +@@ -1412,6 +1425,11 @@ void syscall_trace_leave(struct pt_regs *regs) + { + bool step; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 42eb330..139955c 100644 --- a/arch/x86/kernel/pvclock.c @@ -18820,7 +19252,7 @@ index d7d5099..28555d0 100644 bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c -index 71f4727..217419b 100644 +index 5a98aa2..848d2be 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -21,19 +21,17 @@ @@ -18879,7 +19311,7 @@ index 71f4727..217419b 100644 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); #endif -@@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void) +@@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void) /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { @@ -18891,7 +19323,7 @@ index 71f4727..217419b 100644 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; -@@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void) +@@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void) */ set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif @@ -20334,7 +20766,7 @@ index e385214..f8df033 100644 local_irq_disable(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c -index 3b4c8d8..f457b63 100644 +index a7a6f60..04b745a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1306,7 +1306,11 @@ static void reload_tss(void) @@ -20349,7 +20781,7 @@ index 3b4c8d8..f457b63 100644 load_TR_desc(); } -@@ -2631,8 +2635,11 @@ static __init int hardware_setup(void) +@@ -2637,8 +2641,11 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; @@ -20363,7 +20795,7 @@ index 3b4c8d8..f457b63 100644 if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); -@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void) +@@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void) vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl)); @@ -20372,7 +20804,7 @@ index 3b4c8d8..f457b63 100644 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); -@@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp .Lkvm_vmx_return \n\t" ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" ".Lkvm_vmx_return: " @@ -20385,7 +20817,7 @@ index 3b4c8d8..f457b63 100644 /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%"R"sp) \n\t" "pop %0 \n\t" -@@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) @@ -20397,7 +20829,7 @@ index 3b4c8d8..f457b63 100644 : "cc", "memory" , R"ax", R"bx", R"di", R"si" #ifdef CONFIG_X86_64 -@@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) } } @@ -20416,7 +20848,7 @@ index 3b4c8d8..f457b63 100644 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 9cbfc06..943ffa6 100644 +index 8d1c6c6..6e6d611 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) @@ -20461,7 +20893,7 @@ index 9cbfc06..943ffa6 100644 return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; -@@ -3497,6 +3501,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, +@@ -3499,6 +3503,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, @@ -20471,7 +20903,7 @@ index 9cbfc06..943ffa6 100644 struct x86_exception *exception) { void *data = val; -@@ -3528,6 +3535,9 @@ out: +@@ -3530,6 +3537,9 @@ out: /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, @@ -20481,7 +20913,7 @@ index 9cbfc06..943ffa6 100644 struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); -@@ -3552,6 +3562,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt); +@@ -3554,6 +3564,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, @@ -20491,7 +20923,7 @@ index 9cbfc06..943ffa6 100644 struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); -@@ -3665,12 +3678,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) +@@ -3667,12 +3680,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, @@ -20508,7 +20940,7 @@ index 9cbfc06..943ffa6 100644 void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); -@@ -3821,6 +3838,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, +@@ -3823,6 +3840,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, const void *old, const void *new, unsigned int bytes, @@ -20521,7 +20953,7 @@ index 9cbfc06..943ffa6 100644 struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); -@@ -4780,7 +4803,7 @@ static void kvm_set_mmio_spte_mask(void) +@@ -4782,7 +4805,7 @@ static void kvm_set_mmio_spte_mask(void) kvm_mmu_set_mmio_spte_mask(mask); } @@ -20906,7 +21338,7 @@ index e8e7e0d..56fd1b0 100644 movl %eax, (v) movl %edx, 4(v) diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S -index 391a083..d658e9f 100644 +index 391a083..3a2cf39 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8) @@ -21017,7 +21449,7 @@ index 391a083..d658e9f 100644 -.macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) -+.macro incdec_return func ins insc unchecked ++.macro incdec_return func ins insc unchecked="" +ENTRY(atomic64_\func\()_return\unchecked\()_cx8) CFI_STARTPROC SAVE ebx @@ -24310,7 +24742,7 @@ index f4f29b1..5cac4fb 100644 return (void *)vaddr; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c -index 8ecbb4b..29efd37 100644 +index 8ecbb4b..a269cab 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, @@ -24386,7 +24818,7 @@ index 8ecbb4b..29efd37 100644 /* don't allow allocations above current base */ if (mm->free_area_cache > base) -@@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, +@@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, largest_hole = 0; mm->free_area_cache = base; } @@ -24401,16 +24833,10 @@ index 8ecbb4b..29efd37 100644 + addr = (mm->free_area_cache - len); do { + addr &= huge_page_mask(h); -+ vma = find_vma(mm, addr); /* * Lookup failure means no vma is above this address, * i.e. return with success: -- */ -- vma = find_vma(mm, addr); -- if (!vma) -- return addr; -- -- /* +@@ -341,46 +349,47 @@ try_again: * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ @@ -24483,7 +24909,7 @@ index 8ecbb4b..29efd37 100644 mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); -@@ -388,6 +392,7 @@ fail: +@@ -388,6 +397,7 @@ fail: /* * Restore the topdown base: */ @@ -24491,7 +24917,7 @@ index 8ecbb4b..29efd37 100644 mm->free_area_cache = base; mm->cached_hole_size = ~0UL; -@@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -24512,7 +24938,7 @@ index 8ecbb4b..29efd37 100644 return -ENOMEM; if (flags & MAP_FIXED) { -@@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, +@@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); @@ -24940,7 +25366,7 @@ index 8663f6c..829ae76 100644 printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 436a030..2b60088 100644 +index 436a030..4f97ffc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on); @@ -25057,6 +25483,15 @@ index 436a030..2b60088 100644 adr = (void *)(((unsigned long)adr) | left); return adr; +@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, + unmap_low_page(pmd); + + spin_lock(&init_mm.page_table_lock); +- pud_populate(&init_mm, pud, __va(pmd_phys)); ++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys)); + spin_unlock(&init_mm.page_table_lock); + } + __flush_tlb_all(); @@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start, unmap_low_page(pud); @@ -26837,10 +27272,10 @@ index 153407c..611cba9 100644 -} -__setup("vdso=", vdso_setup); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index 4172af8..2c8ed7f 100644 +index 4e517d4..68a48f5 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c -@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); +@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; @@ -26849,7 +27284,7 @@ index 4172af8..2c8ed7f 100644 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); __read_mostly int xen_have_vector_callback; EXPORT_SYMBOL_GPL(xen_have_vector_callback); -@@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = { +@@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = { #endif }; @@ -26887,7 +27322,7 @@ index 4172af8..2c8ed7f 100644 { if (pm_power_off) pm_power_off(); -@@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void) +@@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void) __userpte_alloc_gfp &= ~__GFP_HIGHMEM; /* Work out if we support NX */ @@ -26906,7 +27341,7 @@ index 4172af8..2c8ed7f 100644 xen_setup_features(); -@@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void) +@@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void) machine_ops = xen_machine_ops; @@ -26921,10 +27356,10 @@ index 4172af8..2c8ed7f 100644 #ifdef CONFIG_ACPI_NUMA diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c -index 95c1cf6..4bfa5be 100644 +index dc19347..1b07a2c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c -@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, +@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, convert_pfn_mfn(init_level4_pgt); convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_kernel_pgt); @@ -26934,7 +27369,7 @@ index 95c1cf6..4bfa5be 100644 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); -@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, +@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); @@ -26946,7 +27381,7 @@ index 95c1cf6..4bfa5be 100644 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); -@@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void) +@@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.set_pud = xen_set_pud; #if PAGETABLE_LEVELS == 4 pv_mmu_ops.set_pgd = xen_set_pgd; @@ -26954,7 +27389,7 @@ index 95c1cf6..4bfa5be 100644 #endif /* This will work as long as patching hasn't happened yet -@@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { +@@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_pgd = xen_set_pgd_hyper, @@ -46851,10 +47286,10 @@ index 5698746..6086012 100644 kfree(s); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c -index 3645cd3..786809c 100644 +index c60267e..193d9e4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c -@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs_fs_type = { +@@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = { .kill_sb = kill_litter_super, }; @@ -47597,7 +48032,7 @@ index f649fba..236bf92 100644 void nfs_fattr_init(struct nfs_fattr *fattr) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c -index edf6d3e..bdd1da7 100644 +index b96fe94..a4dbece 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, @@ -49831,10 +50266,10 @@ index ab30253..4d86958 100644 kfree(s); diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 -index 0000000..4089e05 +index 0000000..2645296 --- /dev/null +++ b/grsecurity/Kconfig -@@ -0,0 +1,1078 @@ +@@ -0,0 +1,1079 @@ +# +# grecurity configuration +# @@ -49969,7 +50404,7 @@ index 0000000..4089e05 + select GRKERNSEC_PROC_ADD + select GRKERNSEC_CHROOT_CHMOD + select GRKERNSEC_CHROOT_NICE -+ select GRKERNSEC_SETXID ++ select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS) + select GRKERNSEC_AUDIT_MOUNT + select GRKERNSEC_MODHARDEN if (MODULES) + select GRKERNSEC_HARDEN_PTRACE @@ -50664,6 +51099,7 @@ index 0000000..4089e05 + +config GRKERNSEC_SETXID + bool "Enforce consistent multithreaded privileges" ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS) + help + If you say Y here, a change from a root uid to a non-root uid + in a multithreaded application will cause the resulting uids, @@ -50959,10 +51395,10 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..42813ac +index 0000000..a6d83f0 --- /dev/null +++ b/grsecurity/gracl.c -@@ -0,0 +1,4192 @@ +@@ -0,0 +1,4193 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> @@ -54820,21 +55256,22 @@ index 0000000..42813ac + if (unlikely(!(gr_status & GR_READY))) + return 0; +#endif ++ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ++ read_lock(&tasklist_lock); ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } + -+ read_lock(&tasklist_lock); -+ while (tmp->pid > 0) { -+ if (tmp == curtemp) -+ break; -+ tmp = tmp->real_parent; -+ } -+ -+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ read_unlock(&tasklist_lock); ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } + read_unlock(&tasklist_lock); -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); -+ return 1; + } -+ read_unlock(&tasklist_lock); + +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE + if (!(gr_status & GR_READY)) @@ -62544,7 +62981,7 @@ index 9c07dce..a92fa71 100644 if (atomic_sub_and_test((int) count, &kref->refcount)) { release(kref); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index bc21720..098aefa 100644 +index 4c4e83d..5f16617 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); @@ -63114,7 +63551,7 @@ index ffc0213..2c1f2cb 100644 return nd->saved_names[nd->depth]; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index 4f3b01a..8256d1a 100644 +index 7e472b7..212d381 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1002,6 +1002,7 @@ struct net_device_ops { @@ -66076,7 +66513,7 @@ index 42e8fa0..9e7406b 100644 return -ENOMEM; diff --git a/kernel/cred.c b/kernel/cred.c -index 48c6fd3..3342f00 100644 +index 48c6fd3..8398912 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk) @@ -66113,7 +66550,7 @@ index 48c6fd3..3342f00 100644 /* dumpability changes */ if (old->euid != new->euid || old->egid != new->egid || -@@ -540,6 +551,92 @@ int commit_creds(struct cred *new) +@@ -540,6 +551,101 @@ int commit_creds(struct cred *new) put_cred(old); return 0; } @@ -66179,6 +66616,8 @@ index 48c6fd3..3342f00 100644 +int commit_creds(struct cred *new) +{ +#ifdef CONFIG_GRKERNSEC_SETXID ++ int ret; ++ int schedule_it = 0; + struct task_struct *t; + + /* we won't get called with tasklist_lock held for writing @@ -66187,20 +66626,27 @@ index 48c6fd3..3342f00 100644 + */ + if (grsec_enable_setxid && !current_is_single_threaded() && + !current_uid() && new->uid) { ++ schedule_it = 1; ++ } ++ ret = __commit_creds(new); ++ if (schedule_it) { + rcu_read_lock(); + read_lock(&tasklist_lock); + for (t = next_thread(current); t != current; + t = next_thread(t)) { + if (t->delayed_cred == NULL) { + t->delayed_cred = get_cred(new); ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID); + set_tsk_need_resched(t); + } + } + read_unlock(&tasklist_lock); + rcu_read_unlock(); + } -+#endif ++ return ret; ++#else + return __commit_creds(new); ++#endif +} + EXPORT_SYMBOL(commit_creds); @@ -69073,39 +69519,10 @@ index e8a1f83..363d17d 100644 #ifdef CONFIG_RT_GROUP_SCHED /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 478a04c..6970d99 100644 +index 478a04c..e16339a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3142,6 +3142,19 @@ pick_next_task(struct rq *rq) - BUG(); /* the idle class will always have a runnable task */ - } - -+#ifdef CONFIG_GRKERNSEC_SETXID -+extern void gr_delayed_cred_worker(void); -+static inline void gr_cred_schedule(void) -+{ -+ if (unlikely(current->delayed_cred)) -+ gr_delayed_cred_worker(); -+} -+#else -+static inline void gr_cred_schedule(void) -+{ -+} -+#endif -+ - /* - * __schedule() is the main scheduler function. - */ -@@ -3161,6 +3174,8 @@ need_resched: - - schedule_debug(prev); - -+ gr_cred_schedule(); -+ - if (sched_feat(HRTICK)) - hrtick_clear(rq); - -@@ -3851,6 +3866,8 @@ int can_nice(const struct task_struct *p, const int nice) +@@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice) /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; @@ -69114,7 +69531,7 @@ index 478a04c..6970d99 100644 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || capable(CAP_SYS_NICE)); } -@@ -3884,7 +3901,8 @@ SYSCALL_DEFINE1(nice, int, increment) +@@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment) if (nice > 19) nice = 19; @@ -69124,7 +69541,7 @@ index 478a04c..6970d99 100644 return -EPERM; retval = security_task_setnice(current, nice); -@@ -4041,6 +4059,7 @@ recheck: +@@ -4041,6 +4044,7 @@ recheck: unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); @@ -70448,6 +70865,28 @@ index 013a761..c28f3fc 100644 #define free(a) kfree(a) #endif +diff --git a/lib/ioremap.c b/lib/ioremap.c +index da4e2ad..6373b5f 100644 +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pmd = pmd_alloc(&init_mm, pud, addr); ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pud = pud_alloc(&init_mm, pgd, addr); ++ pud = pud_alloc_kernel(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index bd2bea9..6b3c95e 100644 --- a/lib/is_single_threaded.c @@ -70677,10 +71116,10 @@ index 8f7fc39..69bf1e9 100644 /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) diff --git a/mm/hugetlb.c b/mm/hugetlb.c -index a7cf829..d60e0e1 100644 +index 24b1787..e0fbc01 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c -@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2425,6 +2425,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, return 1; } @@ -70708,7 +71147,7 @@ index a7cf829..d60e0e1 100644 /* * Hugetlb_cow() should be called with page lock of the original hugepage held. * Called with hugetlb_instantiation_mutex held and pte_page locked so we -@@ -2459,6 +2480,11 @@ retry_avoidcopy: +@@ -2538,6 +2559,11 @@ retry_avoidcopy: make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page); hugepage_add_new_anon_rmap(new_page, vma, address); @@ -70720,7 +71159,7 @@ index a7cf829..d60e0e1 100644 /* Make the old page be freed below */ new_page = old_page; mmu_notifier_invalidate_range_end(mm, -@@ -2613,6 +2639,10 @@ retry: +@@ -2692,6 +2718,10 @@ retry: && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); @@ -70731,7 +71170,7 @@ index a7cf829..d60e0e1 100644 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); -@@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2721,6 +2751,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, static DEFINE_MUTEX(hugetlb_instantiation_mutex); struct hstate *h = hstate_vma(vma); @@ -70742,7 +71181,7 @@ index a7cf829..d60e0e1 100644 address &= huge_page_mask(h); ptep = huge_pte_offset(mm, address); -@@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2734,6 +2768,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, VM_FAULT_SET_HINDEX(h - hstates); } @@ -70982,7 +71421,7 @@ index 56080ea..115071e 100644 /* keep elevated page count for bad page */ return ret; diff --git a/mm/memory.c b/mm/memory.c -index 10b4dda..b1f60ad 100644 +index 10b4dda..06857f3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, @@ -71109,7 +71548,29 @@ index 10b4dda..b1f60ad 100644 if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; -@@ -2472,6 +2485,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo +@@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + + BUG_ON(pud_huge(*pud)); + +- pmd = pmd_alloc(mm, pud, addr); ++ pmd = (mm == &init_mm) ? ++ pmd_alloc_kernel(mm, pud, addr) : ++ pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long next; + int err; + +- pud = pud_alloc(mm, pgd, addr); ++ pud = (mm == &init_mm) ? ++ pud_alloc_kernel(mm, pgd, addr) : ++ pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +@@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo copy_user_highpage(dst, src, va, vma); } @@ -71296,7 +71757,7 @@ index 10b4dda..b1f60ad 100644 /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address -@@ -2683,6 +2876,12 @@ gotten: +@@ -2683,6 +2880,12 @@ gotten: */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { @@ -71309,7 +71770,7 @@ index 10b4dda..b1f60ad 100644 if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, MM_FILEPAGES); -@@ -2734,6 +2933,10 @@ gotten: +@@ -2734,6 +2937,10 @@ gotten: page_remove_rmap(old_page); } @@ -71320,7 +71781,7 @@ index 10b4dda..b1f60ad 100644 /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; -@@ -3013,6 +3216,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); @@ -71332,7 +71793,7 @@ index 10b4dda..b1f60ad 100644 unlock_page(page); if (swapcache) { /* -@@ -3036,6 +3244,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -71344,7 +71805,7 @@ index 10b4dda..b1f60ad 100644 unlock: pte_unmap_unlock(page_table, ptl); out: -@@ -3055,40 +3268,6 @@ out_release: +@@ -3055,40 +3272,6 @@ out_release: } /* @@ -71385,7 +71846,7 @@ index 10b4dda..b1f60ad 100644 * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. -@@ -3097,27 +3276,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { @@ -71418,7 +71879,7 @@ index 10b4dda..b1f60ad 100644 if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); -@@ -3136,6 +3311,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*page_table)) goto release; @@ -71430,7 +71891,7 @@ index 10b4dda..b1f60ad 100644 inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); setpte: -@@ -3143,6 +3323,12 @@ setpte: +@@ -3143,6 +3327,12 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -71443,7 +71904,7 @@ index 10b4dda..b1f60ad 100644 unlock: pte_unmap_unlock(page_table, ptl); return 0; -@@ -3286,6 +3472,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { @@ -71456,7 +71917,7 @@ index 10b4dda..b1f60ad 100644 flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) -@@ -3305,6 +3497,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, page_table); @@ -71471,7 +71932,7 @@ index 10b4dda..b1f60ad 100644 } else { if (cow_page) mem_cgroup_uncharge_page(cow_page); -@@ -3458,6 +3658,12 @@ int handle_pte_fault(struct mm_struct *mm, +@@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm, if (flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vma, address); } @@ -71484,7 +71945,7 @@ index 10b4dda..b1f60ad 100644 unlock: pte_unmap_unlock(pte, ptl); return 0; -@@ -3474,6 +3680,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd; pte_t *pte; @@ -71495,7 +71956,7 @@ index 10b4dda..b1f60ad 100644 __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); -@@ -3485,6 +3695,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); @@ -71530,7 +71991,7 @@ index 10b4dda..b1f60ad 100644 pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) -@@ -3514,7 +3752,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ @@ -71539,7 +72000,7 @@ index 10b4dda..b1f60ad 100644 return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) -@@ -3551,6 +3789,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +@@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } @@ -71563,7 +72024,7 @@ index 10b4dda..b1f60ad 100644 #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED -@@ -3581,6 +3836,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +@@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) spin_unlock(&mm->page_table_lock); return 0; } @@ -71594,7 +72055,7 @@ index 10b4dda..b1f60ad 100644 #endif /* __PAGETABLE_PMD_FOLDED */ int make_pages_present(unsigned long addr, unsigned long end) -@@ -3618,7 +3897,7 @@ static int __init gate_vma_init(void) +@@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; @@ -75428,7 +75889,7 @@ index 68bbf9f..5ef0d12 100644 return err; diff --git a/net/core/dev.c b/net/core/dev.c -index 7f72c9c..e29943b 100644 +index 0336374..659088a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name) @@ -75446,7 +75907,7 @@ index 7f72c9c..e29943b 100644 } } EXPORT_SYMBOL(dev_load); -@@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) +@@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, GFP_ATOMIC)) { @@ -75455,7 +75916,7 @@ index 7f72c9c..e29943b 100644 kfree_skb(skb); return NET_RX_DROP; } -@@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) +@@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) nf_reset(skb); if (unlikely(!is_skb_forwardable(dev, skb))) { @@ -75464,7 +75925,7 @@ index 7f72c9c..e29943b 100644 kfree_skb(skb); return NET_RX_DROP; } -@@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) +@@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) struct dev_gso_cb { void (*destructor)(struct sk_buff *skb); @@ -75473,7 +75934,7 @@ index 7f72c9c..e29943b 100644 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) -@@ -2913,7 +2917,7 @@ enqueue: +@@ -2933,7 +2937,7 @@ enqueue: local_irq_restore(flags); @@ -75482,7 +75943,7 @@ index 7f72c9c..e29943b 100644 kfree_skb(skb); return NET_RX_DROP; } -@@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); @@ -75491,7 +75952,7 @@ index 7f72c9c..e29943b 100644 { struct softnet_data *sd = &__get_cpu_var(softnet_data); -@@ -3273,7 +3277,7 @@ ncls: +@@ -3293,7 +3297,7 @@ ncls: if (pt_prev) { ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { @@ -75500,7 +75961,7 @@ index 7f72c9c..e29943b 100644 kfree_skb(skb); /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) -@@ -3833,7 +3837,7 @@ void netif_napi_del(struct napi_struct *napi) +@@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi) } EXPORT_SYMBOL(netif_napi_del); @@ -75509,7 +75970,7 @@ index 7f72c9c..e29943b 100644 { struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; -@@ -5858,7 +5862,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, +@@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } @@ -86454,7 +86915,7 @@ index af0f22f..9a7d479 100644 break; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index c4ac57e..527711d 100644 +index 7858228..2919715 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -75,7 +75,7 @@ LIST_HEAD(vm_list); @@ -86466,7 +86927,7 @@ index c4ac57e..527711d 100644 struct kmem_cache *kvm_vcpu_cache; EXPORT_SYMBOL_GPL(kvm_vcpu_cache); -@@ -2313,7 +2313,7 @@ static void hardware_enable_nolock(void *junk) +@@ -2318,7 +2318,7 @@ static void hardware_enable_nolock(void *junk) if (r) { cpumask_clear_cpu(cpu, cpus_hardware_enabled); @@ -86475,7 +86936,7 @@ index c4ac57e..527711d 100644 printk(KERN_INFO "kvm: enabling virtualization on " "CPU%d failed\n", cpu); } -@@ -2367,10 +2367,10 @@ static int hardware_enable_all(void) +@@ -2372,10 +2372,10 @@ static int hardware_enable_all(void) kvm_usage_count++; if (kvm_usage_count == 1) { @@ -86488,7 +86949,7 @@ index c4ac57e..527711d 100644 hardware_disable_all_nolock(); r = -EBUSY; } -@@ -2733,7 +2733,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, +@@ -2738,7 +2738,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, kvm_arch_vcpu_put(vcpu); } @@ -86497,7 +86958,7 @@ index c4ac57e..527711d 100644 struct module *module) { int r; -@@ -2796,7 +2796,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -2801,7 +2801,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, @@ -86506,7 +86967,7 @@ index c4ac57e..527711d 100644 if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; -@@ -2806,9 +2806,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -2811,9 +2811,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (r) goto out_free; |