aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--main/linux-grsec/APKBUILD10
-rw-r--r--main/linux-grsec/grsecurity-2.9.1-3.6.7-201211181105.patch (renamed from main/linux-grsec/grsecurity-2.9.1-3.6.6-201211122213.patch)1067
2 files changed, 550 insertions, 527 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index ec1f358aad..ec40286cac 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,9 +2,9 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.6.6
+pkgver=3.6.7
_kernver=3.6
-pkgrel=2
+pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
@@ -14,7 +14,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-2.9.1-3.6.6-201211122213.patch
+ grsecurity-2.9.1-3.6.7-201211181105.patch
0004-arp-flush-arp-cache-on-device-change.patch
@@ -139,8 +139,8 @@ dev() {
}
md5sums="1a1760420eac802c541a20ab51a093d1 linux-3.6.tar.xz
-11d6d8749d4612a77f43f0531c0f2824 patch-3.6.6.xz
-eeadecd13caac8e41b567b21b5d42c80 grsecurity-2.9.1-3.6.6-201211122213.patch
+134936c362d8812b5cafcf3c67afdce0 patch-3.6.7.xz
+af1f2097a6e26d36801188193d3eb185 grsecurity-2.9.1-3.6.7-201211181105.patch
776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch
0b4abb6b3e32cc7ba656c24e30581349 kernelconfig.x86
0971129c59c7fe0011b3ec46982d9f5c kernelconfig.x86_64"
diff --git a/main/linux-grsec/grsecurity-2.9.1-3.6.6-201211122213.patch b/main/linux-grsec/grsecurity-2.9.1-3.6.7-201211181105.patch
index 164e8e9168..6f0229a48a 100644
--- a/main/linux-grsec/grsecurity-2.9.1-3.6.6-201211122213.patch
+++ b/main/linux-grsec/grsecurity-2.9.1-3.6.7-201211181105.patch
@@ -251,7 +251,7 @@ index ad7e2e5..199f49e 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 471b83c..a290aa2 100644
+index 07f2308..7271d99 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -5692,6 +5692,26 @@ index 6cf591b..b49e65a 100644
extra-y := head_$(BITS).o
+diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
+index f8b6eee..87f60ee 100644
+--- a/arch/sparc/kernel/leon_kernel.c
++++ b/arch/sparc/kernel/leon_kernel.c
+@@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu)
+ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ unsigned int eirq;
++ struct irq_bucket *p;
+ int cpu = sparc_leon3_cpuid();
+
+ eirq = leon_eirq_get(cpu);
+- if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
+- generic_handle_irq(irq_map[eirq]->irq);
++ p = irq_map[eirq];
++ if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
++ generic_handle_irq(p->irq);
+ }
+
+ /* The extended IRQ controller has been found, this function registers it */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 14006d8..8146238 100644
--- a/arch/sparc/kernel/process_32.c
@@ -15787,7 +15807,7 @@ index 8f8e8ee..3617d6e 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index dcdd0ea..de0bb2d 100644
+index dcdd0ea..8f32835 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -57,6 +57,8 @@
@@ -15863,7 +15883,7 @@ index dcdd0ea..de0bb2d 100644
jmp *%rdi
#endif
-@@ -180,6 +188,280 @@ ENTRY(native_usergs_sysret64)
+@@ -180,6 +188,273 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -15949,13 +15969,6 @@ index dcdd0ea..de0bb2d 100644
+ ljmpq __KERNEL_CS,3f
+3: SET_RDI_INTO_CR0
+ jmp 1b
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI);
-+#endif
-+
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
+ENDPROC(pax_exit_kernel)
+#endif
+
@@ -16144,7 +16157,7 @@ index dcdd0ea..de0bb2d 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -271,8 +553,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -271,8 +546,8 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro UNFAKE_STACK_FRAME
@@ -16155,7 +16168,7 @@ index dcdd0ea..de0bb2d 100644
.endm
/*
-@@ -359,7 +641,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -359,7 +634,7 @@ ENDPROC(native_usergs_sysret64)
movq %rsp, %rsi
leaq -RBP(%rsp),%rdi /* arg1 for handler */
@@ -16164,7 +16177,7 @@ index dcdd0ea..de0bb2d 100644
je 1f
SWAPGS
/*
-@@ -394,9 +676,10 @@ ENTRY(save_rest)
+@@ -394,9 +669,10 @@ ENTRY(save_rest)
movq_cfi r15, R15+16
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
@@ -16176,7 +16189,7 @@ index dcdd0ea..de0bb2d 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -425,9 +708,10 @@ ENTRY(save_paranoid)
+@@ -425,9 +701,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -16189,7 +16202,7 @@ index dcdd0ea..de0bb2d 100644
.popsection
/*
-@@ -449,7 +733,7 @@ ENTRY(ret_from_fork)
+@@ -449,7 +726,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -16198,7 +16211,7 @@ index dcdd0ea..de0bb2d 100644
jz retint_restore_args
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -459,7 +743,7 @@ ENTRY(ret_from_fork)
+@@ -459,7 +736,7 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath
CFI_ENDPROC
@@ -16207,7 +16220,7 @@ index dcdd0ea..de0bb2d 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -495,7 +779,7 @@ END(ret_from_fork)
+@@ -495,7 +772,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -16216,7 +16229,7 @@ index dcdd0ea..de0bb2d 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -508,16 +792,23 @@ GLOBAL(system_call_after_swapgs)
+@@ -508,16 +785,23 @@ GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -16242,7 +16255,7 @@ index dcdd0ea..de0bb2d 100644
jnz tracesys
system_call_fastpath:
#if __SYSCALL_MASK == ~0
-@@ -527,7 +818,7 @@ system_call_fastpath:
+@@ -527,7 +811,7 @@ system_call_fastpath:
cmpl $__NR_syscall_max,%eax
#endif
ja badsys
@@ -16251,7 +16264,7 @@ index dcdd0ea..de0bb2d 100644
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
/*
-@@ -541,10 +832,13 @@ sysret_check:
+@@ -541,10 +825,13 @@ sysret_check:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -16266,7 +16279,7 @@ index dcdd0ea..de0bb2d 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -596,14 +890,18 @@ badsys:
+@@ -596,14 +883,18 @@ badsys:
* jump back to the normal fast path.
*/
auditsys:
@@ -16286,7 +16299,7 @@ index dcdd0ea..de0bb2d 100644
jmp system_call_fastpath
/*
-@@ -624,7 +922,7 @@ sysret_audit:
+@@ -624,7 +915,7 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -16295,7 +16308,7 @@ index dcdd0ea..de0bb2d 100644
jz auditsys
#endif
SAVE_REST
-@@ -632,12 +930,16 @@ tracesys:
+@@ -632,12 +923,16 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -16312,7 +16325,7 @@ index dcdd0ea..de0bb2d 100644
RESTORE_REST
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
-@@ -646,7 +948,7 @@ tracesys:
+@@ -646,7 +941,7 @@ tracesys:
cmpl $__NR_syscall_max,%eax
#endif
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
@@ -16321,7 +16334,7 @@ index dcdd0ea..de0bb2d 100644
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
-@@ -667,7 +969,9 @@ GLOBAL(int_with_check)
+@@ -667,7 +962,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
@@ -16332,7 +16345,7 @@ index dcdd0ea..de0bb2d 100644
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
-@@ -713,7 +1017,7 @@ int_restore_rest:
+@@ -713,7 +1010,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -16341,7 +16354,7 @@ index dcdd0ea..de0bb2d 100644
/*
* Certain special system calls that need to save a complete full stack frame.
-@@ -729,7 +1033,7 @@ ENTRY(\label)
+@@ -729,7 +1026,7 @@ ENTRY(\label)
call \func
jmp ptregscall_common
CFI_ENDPROC
@@ -16350,7 +16363,7 @@ index dcdd0ea..de0bb2d 100644
.endm
PTREGSCALL stub_clone, sys_clone, %r8
-@@ -747,9 +1051,10 @@ ENTRY(ptregscall_common)
+@@ -747,9 +1044,10 @@ ENTRY(ptregscall_common)
movq_cfi_restore R12+8, r12
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
@@ -16362,7 +16375,7 @@ index dcdd0ea..de0bb2d 100644
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -764,7 +1069,7 @@ ENTRY(stub_execve)
+@@ -764,7 +1062,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -16371,7 +16384,7 @@ index dcdd0ea..de0bb2d 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -782,7 +1087,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -782,7 +1080,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -16380,7 +16393,7 @@ index dcdd0ea..de0bb2d 100644
#ifdef CONFIG_X86_X32_ABI
PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
-@@ -851,7 +1156,7 @@ vector=vector+1
+@@ -851,7 +1149,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -16389,7 +16402,7 @@ index dcdd0ea..de0bb2d 100644
.previous
END(interrupt)
-@@ -871,6 +1176,16 @@ END(interrupt)
+@@ -871,6 +1169,16 @@ END(interrupt)
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
SAVE_ARGS_IRQ
@@ -16406,7 +16419,7 @@ index dcdd0ea..de0bb2d 100644
call \func
.endm
-@@ -902,7 +1217,7 @@ ret_from_intr:
+@@ -902,7 +1210,7 @@ ret_from_intr:
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -16415,7 +16428,7 @@ index dcdd0ea..de0bb2d 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -924,12 +1239,16 @@ retint_swapgs: /* return to user-space */
+@@ -924,12 +1232,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -16432,7 +16445,7 @@ index dcdd0ea..de0bb2d 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1012,7 +1331,7 @@ ENTRY(retint_kernel)
+@@ -1012,7 +1324,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -16441,7 +16454,7 @@ index dcdd0ea..de0bb2d 100644
/*
* End of kprobes section
*/
-@@ -1029,7 +1348,7 @@ ENTRY(\sym)
+@@ -1029,7 +1341,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -16450,7 +16463,7 @@ index dcdd0ea..de0bb2d 100644
.endm
#ifdef CONFIG_SMP
-@@ -1084,12 +1403,22 @@ ENTRY(\sym)
+@@ -1084,12 +1396,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -16474,7 +16487,7 @@ index dcdd0ea..de0bb2d 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1101,15 +1430,25 @@ ENTRY(\sym)
+@@ -1101,15 +1423,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -16502,7 +16515,7 @@ index dcdd0ea..de0bb2d 100644
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1119,14 +1458,30 @@ ENTRY(\sym)
+@@ -1119,14 +1451,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF_DEBUG
@@ -16534,7 +16547,7 @@ index dcdd0ea..de0bb2d 100644
.endm
.macro errorentry sym do_sym
-@@ -1137,13 +1492,23 @@ ENTRY(\sym)
+@@ -1137,13 +1485,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -16559,7 +16572,7 @@ index dcdd0ea..de0bb2d 100644
.endm
/* error code is on the stack already */
-@@ -1156,13 +1521,23 @@ ENTRY(\sym)
+@@ -1156,13 +1514,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -16584,7 +16597,7 @@ index dcdd0ea..de0bb2d 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1192,9 +1567,10 @@ gs_change:
+@@ -1192,9 +1560,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -16596,7 +16609,7 @@ index dcdd0ea..de0bb2d 100644
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1213,13 +1589,14 @@ ENTRY(kernel_thread_helper)
+@@ -1213,13 +1582,14 @@ ENTRY(kernel_thread_helper)
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
*/
@@ -16612,7 +16625,7 @@ index dcdd0ea..de0bb2d 100644
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1246,11 +1623,11 @@ ENTRY(kernel_execve)
+@@ -1246,11 +1616,11 @@ ENTRY(kernel_execve)
RESTORE_REST
testq %rax,%rax
je int_ret_from_sys_call
@@ -16626,7 +16639,7 @@ index dcdd0ea..de0bb2d 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
-@@ -1268,9 +1645,10 @@ ENTRY(call_softirq)
+@@ -1268,9 +1638,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -16638,7 +16651,7 @@ index dcdd0ea..de0bb2d 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1308,7 +1686,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1308,7 +1679,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -16647,7 +16660,7 @@ index dcdd0ea..de0bb2d 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1367,7 +1745,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1367,7 +1738,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -16656,7 +16669,7 @@ index dcdd0ea..de0bb2d 100644
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1416,16 +1794,31 @@ ENTRY(paranoid_exit)
+@@ -1416,16 +1787,31 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -16689,7 +16702,7 @@ index dcdd0ea..de0bb2d 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1454,7 +1847,7 @@ paranoid_schedule:
+@@ -1454,7 +1840,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -16698,7 +16711,7 @@ index dcdd0ea..de0bb2d 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1481,12 +1874,13 @@ ENTRY(error_entry)
+@@ -1481,12 +1867,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -16713,7 +16726,7 @@ index dcdd0ea..de0bb2d 100644
ret
/*
-@@ -1513,7 +1907,7 @@ bstep_iret:
+@@ -1513,7 +1900,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -16722,7 +16735,7 @@ index dcdd0ea..de0bb2d 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1533,7 +1927,7 @@ ENTRY(error_exit)
+@@ -1533,7 +1920,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -16731,7 +16744,7 @@ index dcdd0ea..de0bb2d 100644
/*
* Test if a given stack is an NMI stack or not.
-@@ -1591,9 +1985,11 @@ ENTRY(nmi)
+@@ -1591,9 +1978,11 @@ ENTRY(nmi)
* If %cs was not the kernel segment, then the NMI triggered in user
* space, which means it is definitely not nested.
*/
@@ -16744,7 +16757,7 @@ index dcdd0ea..de0bb2d 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
-@@ -1752,6 +2148,17 @@ end_repeat_nmi:
+@@ -1752,6 +2141,17 @@ end_repeat_nmi:
*/
movq %cr2, %r12
@@ -16762,7 +16775,7 @@ index dcdd0ea..de0bb2d 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1767,21 +2174,32 @@ end_repeat_nmi:
+@@ -1767,21 +2167,32 @@ end_repeat_nmi:
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
@@ -20427,6 +20440,20 @@ index 0595f13..b544fa3 100644
return 0;
out:
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index a10e460..58fc514 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+
++ if (!static_cpu_has(X86_FEATURE_XSAVE))
++ return 0;
++
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+ }
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a3b57a2..ebbe732 100644
--- a/arch/x86/kvm/emulate.c
@@ -20626,7 +20653,7 @@ index b1eb202..254e292 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 2966c84..207d0cb 100644
+index 2966c84..9ac0c3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1379,8 +1379,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -20667,6 +20694,16 @@ index 2966c84..207d0cb 100644
{
int r;
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+@@ -5762,6 +5764,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ int pending_vec, max_bits, idx;
+ struct desc_ptr dt;
+
++ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++ return -EINVAL;
++
+ dt.size = sregs->idt.limit;
+ dt.address = sregs->idt.base;
+ kvm_x86_ops->set_idt(vcpu, &dt);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 642d880..cc9ebac 100644
--- a/arch/x86/lguest/boot.c
@@ -27411,10 +27448,10 @@ index c1461de..355f120 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 5141d80..6c9fb41 100644
+index dde1a3f..6b663e6 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
-@@ -1765,6 +1765,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
+@@ -1784,6 +1784,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
convert_pfn_mfn(init_level4_pgt);
convert_pfn_mfn(level3_ident_pgt);
convert_pfn_mfn(level3_kernel_pgt);
@@ -27424,7 +27461,7 @@ index 5141d80..6c9fb41 100644
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
-@@ -1783,7 +1786,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
+@@ -1802,7 +1805,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -27436,7 +27473,7 @@ index 5141d80..6c9fb41 100644
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-@@ -1967,6 +1974,7 @@ static void __init xen_post_allocator_init(void)
+@@ -1986,6 +1993,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -27444,7 +27481,7 @@ index 5141d80..6c9fb41 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2048,6 +2056,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2067,6 +2075,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -27785,7 +27822,7 @@ index 9a87daa..fb17486 100644
goto error;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
-index 671d4d6..5f24030 100644
+index 671d4d6..afec999 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
@@ -27806,6 +27843,28 @@ index 671d4d6..5f24030 100644
static void cryptd_queue_worker(struct work_struct *work);
+@@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work)
+ struct crypto_async_request *req, *backlog;
+
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+- /* Only handle one request at a time to avoid hogging crypto
+- * workqueue. preempt_disable/enable is used to prevent
+- * being preempted by cryptd_enqueue_request() */
++ /*
++ * Only handle one request at a time to avoid hogging crypto workqueue.
++ * preempt_disable/enable is used to prevent being preempted by
++ * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
++ * cryptd_enqueue_request() being accessed from software interrupts.
++ */
++ local_bh_disable();
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
++ local_bh_enable();
+
+ if (!req)
+ return;
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index e6defd8..c26a225 100644
--- a/drivers/acpi/apei/cper.c
@@ -30368,7 +30427,7 @@ index 9238de4..a27c72a 100644
DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
-index 5062eec..4e3e51f 100644
+index 7aff5c7..bee6c27 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
@@ -30380,18 +30439,34 @@ index 5062eec..4e3e51f 100644
dev->sigdata.lock = NULL;
-@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
+@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+- if (!dev->open_count++)
++ if (local_inc_return(&dev->open_count) == 1)
+ need_setup = 1;
+ mutex_lock(&dev->struct_mutex);
+ old_mapping = dev->dev_mapping;
+@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
-- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
-- if (!dev->open_count++)
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
-+ if (local_inc_return(&dev->open_count) == 1)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
-@@ -422,7 +422,7 @@ int drm_release(struct inode *inode, struct file *filp)
+ if (retcode)
+ goto err_undo;
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+@@ -164,7 +164,7 @@ err_undo:
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+ dev->dev_mapping = old_mapping;
+ mutex_unlock(&dev->struct_mutex);
+- dev->open_count--;
++ local_dec(&dev->open_count);
+ return retcode;
+ }
+ EXPORT_SYMBOL(drm_open);
+@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&drm_global_mutex);
@@ -30400,7 +30475,7 @@ index 5062eec..4e3e51f 100644
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
-@@ -431,10 +431,10 @@ int drm_release(struct inode *inode, struct file *filp)
+@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
* Begin inline drm_release
*/
@@ -30413,7 +30488,7 @@ index 5062eec..4e3e51f 100644
/* Release any auth tokens that might point to this file_priv,
(do that under the drm_global_mutex) */
-@@ -530,8 +530,8 @@ int drm_release(struct inode *inode, struct file *filp)
+@@ -546,8 +546,8 @@ int drm_release(struct inode *inode, struct file *filp)
* End inline drm_release
*/
@@ -30654,7 +30729,7 @@ index 359f6e8..ada68fd 100644
if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 914c0df..d47d380 100644
+index 0969a7c..b9ffa45 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1269,7 +1269,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -35325,18 +35400,6 @@ index 4a518a3..936b334 100644
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
-diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
-index 53743f7..af8b414 100644
---- a/drivers/net/ethernet/nxp/lpc_eth.c
-+++ b/drivers/net/ethernet/nxp/lpc_eth.c
-@@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
- pldat->dma_buff_base_p);
- free_irq(ndev->irq, ndev);
- iounmap(pldat->net_base);
-+ mdiobus_unregister(pldat->mii_bus);
- mdiobus_free(pldat->mii_bus);
- clk_disable(pldat->clk);
- clk_put(pldat->clk);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b47d5b3..273a516 100644
--- a/drivers/net/ethernet/realtek/r8169.c
@@ -37943,40 +38006,6 @@ index 0d4aa82..f7832d4 100644
extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
-diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
-index 9fc9a60..68d4c10 100644
---- a/drivers/target/target_core_device.c
-+++ b/drivers/target/target_core_device.c
-@@ -850,20 +850,20 @@ int se_dev_check_shutdown(struct se_device *dev)
-
- static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
- {
-- u32 tmp, aligned_max_sectors;
-+ u32 aligned_max_sectors;
-+ u32 alignment;
- /*
- * Limit max_sectors to a PAGE_SIZE aligned value for modern
- * transport_allocate_data_tasks() operation.
- */
-- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
-- aligned_max_sectors = (tmp / block_size);
-- if (max_sectors != aligned_max_sectors) {
-- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
-- " to %u\n", max_sectors, aligned_max_sectors);
-- return aligned_max_sectors;
-- }
-+ alignment = max(1ul, PAGE_SIZE / block_size);
-+ aligned_max_sectors = rounddown(max_sectors, alignment);
-
-- return max_sectors;
-+ if (max_sectors != aligned_max_sectors)
-+ pr_info("Rounding down aligned max_sectors from %u to %u\n",
-+ max_sectors, aligned_max_sectors);
-+
-+ return aligned_max_sectors;
- }
-
- void se_dev_set_default_attribs(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7502660..f214d9f 100644
--- a/drivers/target/target_core_transport.c
@@ -42708,6 +42737,22 @@ index 88714ae..16c2e11 100644
static inline u32 get_pll_internal_frequency(u32 ref_freq,
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index c3b3f7f..abd47c7 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -225,8 +225,10 @@ EXPORT_SYMBOL_GPL(register_virtio_device);
+
+ void unregister_virtio_device(struct virtio_device *dev)
+ {
++ int index = dev->index; /* save for after device release */
++
+ device_unregister(&dev->dev);
+- ida_simple_remove(&virtio_index_ida, dev->index);
++ ida_simple_remove(&virtio_index_ida, index);
+ }
+ EXPORT_SYMBOL_GPL(unregister_virtio_device);
+
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 453db0c..604973e 100644
--- a/drivers/virtio/virtio_mmio.c
@@ -42989,7 +43034,7 @@ index d146e18..12d1bd1 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 0225fdd..08bda99 100644
+index 0225fdd..0c0d35d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -32,6 +32,7 @@
@@ -43135,7 +43180,7 @@ index 0225fdd..08bda99 100644
error = -ENOMEM;
goto out_close;
}
-@@ -523,6 +544,311 @@ out:
+@@ -523,6 +544,315 @@ out:
return error;
}
@@ -43369,7 +43414,7 @@ index 0225fdd..08bda99 100644
+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
+
+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
-+ if (xattr_size <= 0)
++ if (xattr_size <= 0 || xattr_size > 5)
+ return ~0UL;
+
+ for (i = 0; i < xattr_size; i++)
@@ -43379,9 +43424,13 @@ index 0225fdd..08bda99 100644
+
+#define parse_flag(option1, option2, flag) \
+ case option1: \
++ if (pax_flags_hardmode & MF_PAX_##flag) \
++ return ~0UL; \
+ pax_flags_hardmode |= MF_PAX_##flag; \
+ break; \
+ case option2: \
++ if (pax_flags_softmode & MF_PAX_##flag) \
++ return ~0UL; \
+ pax_flags_softmode |= MF_PAX_##flag; \
+ break;
+
@@ -43447,7 +43496,7 @@ index 0225fdd..08bda99 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -539,6 +865,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -539,6 +869,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -43459,7 +43508,7 @@ index 0225fdd..08bda99 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -557,7 +888,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -557,7 +892,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -43468,7 +43517,7 @@ index 0225fdd..08bda99 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -567,11 +898,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -567,11 +902,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -43481,7 +43530,7 @@ index 0225fdd..08bda99 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -707,11 +1038,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -707,11 +1042,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
/* OK, This is the point of no return */
@@ -43564,7 +43613,7 @@ index 0225fdd..08bda99 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -802,6 +1203,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -802,6 +1207,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -43585,7 +43634,7 @@ index 0225fdd..08bda99 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -834,9 +1249,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -834,9 +1253,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -43598,7 +43647,7 @@ index 0225fdd..08bda99 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -875,17 +1290,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -875,17 +1294,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -43649,7 +43698,7 @@ index 0225fdd..08bda99 100644
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1107,7 +1549,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+@@ -1107,7 +1553,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -43658,7 +43707,7 @@ index 0225fdd..08bda99 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1144,7 +1586,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1144,7 +1590,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -43667,7 +43716,7 @@ index 0225fdd..08bda99 100644
goto whole;
/*
-@@ -1366,9 +1808,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1366,9 +1812,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -43679,7 +43728,7 @@ index 0225fdd..08bda99 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1879,14 +2321,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1879,14 +2325,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -43696,7 +43745,7 @@ index 0225fdd..08bda99 100644
return size;
}
-@@ -1980,7 +2422,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1980,7 +2426,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -43705,7 +43754,7 @@ index 0225fdd..08bda99 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -1994,10 +2436,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,10 +2440,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -43718,7 +43767,7 @@ index 0225fdd..08bda99 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -2011,7 +2455,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2011,7 +2459,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -43727,7 +43776,7 @@ index 0225fdd..08bda99 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2022,6 +2466,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2022,6 +2470,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -43735,7 +43784,7 @@ index 0225fdd..08bda99 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2046,7 +2491,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2046,7 +2495,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -43744,7 +43793,7 @@ index 0225fdd..08bda99 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2055,6 +2500,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2055,6 +2504,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -43752,7 +43801,7 @@ index 0225fdd..08bda99 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2072,6 +2518,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2072,6 +2522,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -43760,7 +43809,7 @@ index 0225fdd..08bda99 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2092,6 +2539,97 @@ out:
+@@ -2092,6 +2543,97 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -47589,6 +47638,54 @@ index 753af3d..f7b021a 100644
if (!IS_ERR(s))
kfree(s);
}
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index a3bde91..2524579 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -497,8 +497,11 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
+ struct gfs2_quota_data **qd;
+ int error;
+
+- if (ip->i_res == NULL)
+- gfs2_rs_alloc(ip);
++ if (ip->i_res == NULL) {
++ error = gfs2_rs_alloc(ip);
++ if (error)
++ return error;
++ }
+
+ qd = ip->i_res->rs_qa_qd;
+
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index c9ed814..4a45d5f 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -477,7 +477,6 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
+ */
+ int gfs2_rs_alloc(struct gfs2_inode *ip)
+ {
+- int error = 0;
+ struct gfs2_blkreserv *res;
+
+ if (ip->i_res)
+@@ -485,7 +484,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
+
+ res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
+ if (!res)
+- error = -ENOMEM;
++ return -ENOMEM;
+
+ down_write(&ip->i_rw_mutex);
+ if (ip->i_res)
+@@ -493,7 +492,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
+ else
+ ip->i_res = res;
+ up_write(&ip->i_rw_mutex);
+- return error;
++ return 0;
+ }
+
+ static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8349a89..51a0254 100644
--- a/fs/hugetlbfs/inode.c
@@ -48448,6 +48545,18 @@ index a9269f1..5490437 100644
set_fs(oldfs);
if (host_err < 0)
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index f35794b..a506360 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
++ break;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index d438036..0ecadde 100644
--- a/fs/notify/fanotify/fanotify_user.c
@@ -50078,6 +50187,25 @@ index 1ccfa53..0848f95 100644
} else if (mm) {
pid_t tid = vm_is_stack(priv->task, vma, is_pid);
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 29996e8..2d1e0f3 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -161,12 +161,13 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
+
+ while (s < e) {
+ unsigned long flags;
++ u64 id;
+
+ if (c > psinfo->bufsize)
+ c = psinfo->bufsize;
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ memcpy(psinfo->buf, s, c);
+- psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo);
++ psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo);
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ s += c;
+ c = e - s;
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
index d67908b..d13f6a6 100644
--- a/fs/quota/netlink.c
@@ -65250,26 +65378,6 @@ index 9e5425b..8136ffc 100644
struct list_head list;
/* Protects from simultaneous access to first_req list */
spinlock_t info_list_lock;
-diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
-index f10553c..fb5204b 100644
---- a/include/net/cfg80211.h
-+++ b/include/net/cfg80211.h
-@@ -2633,6 +2633,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
- unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
-
- /**
-+ * ieee80211_get_mesh_hdrlen - get mesh extension header length
-+ * @meshhdr: the mesh extension header, only the flags field
-+ * (first byte) will be accessed
-+ * Returns the length of the extension header, which is always at
-+ * least 6 bytes and at most 18 if address 5 and 6 are present.
-+ */
-+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
-+
-+/**
- * DOC: Data path helpers
- *
- * In addition to generic utilities, cfg80211 also offers
diff --git a/include/net/flow.h b/include/net/flow.h
index 628e11b..4c475df 100644
--- a/include/net/flow.h
@@ -67668,7 +67776,7 @@ index 2c8857e..288c9c7 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 3717e7b..473c750 100644
+index 20ef219..b3a0cb2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -67691,7 +67799,7 @@ index 3717e7b..473c750 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -2714,6 +2720,7 @@ static int __init futex_init(void)
+@@ -2717,6 +2723,7 @@ static int __init futex_init(void)
{
u32 curval;
int i;
@@ -67699,7 +67807,7 @@ index 3717e7b..473c750 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -2725,8 +2732,11 @@ static int __init futex_init(void)
+@@ -2728,8 +2735,11 @@ static int __init futex_init(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -68223,7 +68331,7 @@ index 91c32a0..7b88d63 100644
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contending_point[i],
diff --git a/kernel/module.c b/kernel/module.c
-index 9ad9ee9..f6e05c2 100644
+index 9ad9ee9..731c128 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -58,6 +58,7 @@
@@ -68512,7 +68620,7 @@ index 9ad9ee9..f6e05c2 100644
}
}
-@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2266,28 +2284,33 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
@@ -68521,8 +68629,23 @@ index 9ad9ee9..f6e05c2 100644
info->index.sym) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
-@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+ src = (void *)info->hdr + symsect->sh_offset;
+ nsrc = symsect->sh_size / sizeof(*src);
+
++ /* strtab always starts with a nul, so offset 0 is the empty string. */
++ strtab_size = 1;
++
+ /* Compute total space required for the core symbols' strtab. */
+- for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src)
+- if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
+- strtab_size += strlen(&info->strtab[src->st_name]) + 1;
++ for (ndst = i = 0; i < nsrc; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ strtab_size += strlen(&info->strtab[src[i].st_name])+1;
+ ndst++;
}
++ }
/* Append room for core symbols at end of core part. */
- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
@@ -68539,7 +68662,7 @@ index 9ad9ee9..f6e05c2 100644
info->index.str) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
}
-@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2305,24 +2328,28 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
/* Make sure we get permanent strtab: don't use info->strtab. */
mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
@@ -68554,10 +68677,23 @@ index 9ad9ee9..f6e05c2 100644
+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
src = mod->symtab;
- *dst = *src;
+- *dst = *src;
*s++ = 0;
-@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
- s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
+- for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
+- if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
+- continue;
+-
+- dst[ndst] = *src;
+- dst[ndst++].st_name = s - mod->core_strtab;
+- s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
++ for (ndst = i = 0; i < mod->num_symtab; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ dst[ndst] = src[i];
++ dst[ndst++].st_name = s - mod->core_strtab;
++ s += strlcpy(s, &mod->strtab[src[i].st_name],
++ KSYM_NAME_LEN) + 1;
++ }
}
mod->core_num_syms = ndst;
+
@@ -68565,7 +68701,7 @@ index 9ad9ee9..f6e05c2 100644
}
#else
static inline void layout_symtab(struct module *mod, struct load_info *info)
-@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
+@@ -2356,17 +2383,33 @@ void * __weak module_alloc(unsigned long size)
return size == 0 ? NULL : vmalloc_exec(size);
}
@@ -68604,7 +68740,7 @@ index 9ad9ee9..f6e05c2 100644
mutex_unlock(&module_mutex);
}
return ret;
-@@ -2544,8 +2582,14 @@ static struct module *setup_load_info(struct load_info *info)
+@@ -2544,8 +2587,14 @@ static struct module *setup_load_info(struct load_info *info)
static int check_modinfo(struct module *mod, struct load_info *info)
{
const char *modmagic = get_modinfo(info, "vermagic");
@@ -68619,7 +68755,7 @@ index 9ad9ee9..f6e05c2 100644
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
err = try_to_force_load(mod, "bad vermagic");
-@@ -2568,7 +2612,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
+@@ -2568,7 +2617,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
}
/* Set up license info based on the info section */
@@ -68628,7 +68764,7 @@ index 9ad9ee9..f6e05c2 100644
return 0;
}
-@@ -2662,7 +2706,7 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2662,7 +2711,7 @@ static int move_module(struct module *mod, struct load_info *info)
void *ptr;
/* Do the allocs. */
@@ -68637,7 +68773,7 @@ index 9ad9ee9..f6e05c2 100644
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
-@@ -2672,23 +2716,50 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2672,23 +2721,50 @@ static int move_module(struct module *mod, struct load_info *info)
if (!ptr)
return -ENOMEM;
@@ -68696,7 +68832,7 @@ index 9ad9ee9..f6e05c2 100644
/* Transfer each section which specifies SHF_ALLOC */
pr_debug("final section addresses:\n");
-@@ -2699,16 +2770,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2699,16 +2775,45 @@ static int move_module(struct module *mod, struct load_info *info)
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
@@ -68749,7 +68885,7 @@ index 9ad9ee9..f6e05c2 100644
pr_debug("\t0x%lx %s\n",
(long)shdr->sh_addr, info->secstrings + shdr->sh_name);
}
-@@ -2763,12 +2863,12 @@ static void flush_module_icache(const struct module *mod)
+@@ -2763,12 +2868,12 @@ static void flush_module_icache(const struct module *mod)
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
@@ -68768,7 +68904,7 @@ index 9ad9ee9..f6e05c2 100644
set_fs(old_fs);
}
-@@ -2838,8 +2938,10 @@ out:
+@@ -2838,8 +2943,10 @@ out:
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
@@ -68781,7 +68917,7 @@ index 9ad9ee9..f6e05c2 100644
}
int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -2852,7 +2954,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2852,7 +2959,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
static int post_relocation(struct module *mod, const struct load_info *info)
{
/* Sort exception table now relocations are done. */
@@ -68791,7 +68927,7 @@ index 9ad9ee9..f6e05c2 100644
/* Copy relocated percpu area over. */
percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
-@@ -2903,9 +3007,38 @@ static struct module *load_module(void __user *umod,
+@@ -2903,9 +3012,38 @@ static struct module *load_module(void __user *umod,
if (err)
goto free_unload;
@@ -68830,7 +68966,7 @@ index 9ad9ee9..f6e05c2 100644
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(mod, &info);
if (err < 0)
-@@ -2921,13 +3054,6 @@ static struct module *load_module(void __user *umod,
+@@ -2921,13 +3059,6 @@ static struct module *load_module(void __user *umod,
flush_module_icache(mod);
@@ -68844,7 +68980,7 @@ index 9ad9ee9..f6e05c2 100644
/* Mark state as coming so strong_try_module_get() ignores us. */
mod->state = MODULE_STATE_COMING;
-@@ -2985,11 +3111,10 @@ static struct module *load_module(void __user *umod,
+@@ -2985,11 +3116,10 @@ static struct module *load_module(void __user *umod,
unlock:
mutex_unlock(&module_mutex);
synchronize_sched();
@@ -68857,7 +68993,7 @@ index 9ad9ee9..f6e05c2 100644
free_unload:
module_unload_free(mod);
free_module:
-@@ -3030,16 +3155,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+@@ -3030,16 +3160,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
MODULE_STATE_COMING, mod);
/* Set RO and NX regions for core */
@@ -68882,7 +69018,7 @@ index 9ad9ee9..f6e05c2 100644
do_mod_ctors(mod);
/* Start the module */
-@@ -3085,11 +3210,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+@@ -3085,11 +3215,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
@@ -68900,7 +69036,7 @@ index 9ad9ee9..f6e05c2 100644
mutex_unlock(&module_mutex);
return 0;
-@@ -3120,10 +3246,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3120,10 +3251,16 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
@@ -68920,7 +69056,7 @@ index 9ad9ee9..f6e05c2 100644
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
-@@ -3358,7 +3490,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3358,7 +3495,7 @@ static int m_show(struct seq_file *m, void *p)
char buf[8];
seq_printf(m, "%s %u",
@@ -68929,7 +69065,7 @@ index 9ad9ee9..f6e05c2 100644
print_unload_info(m, mod);
/* Informative for users. */
-@@ -3367,7 +3499,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3367,7 +3504,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading":
"Live");
/* Used by oprofile and other similar tools. */
@@ -68938,7 +69074,7 @@ index 9ad9ee9..f6e05c2 100644
/* Taints info */
if (mod->taints)
-@@ -3403,7 +3535,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -3403,7 +3540,17 @@ static const struct file_operations proc_modules_operations = {
static int __init proc_modules_init(void)
{
@@ -68956,7 +69092,7 @@ index 9ad9ee9..f6e05c2 100644
return 0;
}
module_init(proc_modules_init);
-@@ -3462,12 +3604,12 @@ struct module *__module_address(unsigned long addr)
+@@ -3462,12 +3609,12 @@ struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -68972,7 +69108,7 @@ index 9ad9ee9..f6e05c2 100644
return mod;
return NULL;
}
-@@ -3501,11 +3643,20 @@ bool is_module_text_address(unsigned long addr)
+@@ -3501,11 +3648,20 @@ bool is_module_text_address(unsigned long addr)
*/
struct module *__module_text_address(unsigned long addr)
{
@@ -71437,6 +71573,55 @@ index bd2bea9..6b3c95e 100644
if (atomic_read(&task->signal->live) != 1)
return false;
+diff --git a/lib/list_debug.c b/lib/list_debug.c
+index c24c2f7..bef49ee 100644
+--- a/lib/list_debug.c
++++ b/lib/list_debug.c
+@@ -23,17 +23,19 @@ void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+ {
+- WARN(next->prev != prev,
++ if (WARN(next->prev != prev,
+ "list_add corruption. next->prev should be "
+ "prev (%p), but was %p. (next=%p).\n",
+- prev, next->prev, next);
+- WARN(prev->next != next,
++ prev, next->prev, next) ||
++ WARN(prev->next != next,
+ "list_add corruption. prev->next should be "
+ "next (%p), but was %p. (prev=%p).\n",
+- next, prev->next, prev);
+- WARN(new == prev || new == next,
++ next, prev->next, prev) ||
++ WARN(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+- new, prev, next);
++ new, prev, next))
++ return;
++
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+@@ -86,12 +88,14 @@ EXPORT_SYMBOL(list_del);
+ void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next)
+ {
+- WARN(next->prev != prev,
++ if (WARN(next->prev != prev,
+ "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+- prev, next->prev, next);
+- WARN(prev->next != next,
++ prev, next->prev, next) ||
++ WARN(prev->next != next,
+ "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+- next, prev->next, prev);
++ next, prev->next, prev))
++ return;
++
+ new->next = next;
+ new->prev = prev;
+ rcu_assign_pointer(list_next_rcu(prev), new);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e796429..6e38f9f 100644
--- a/lib/radix-tree.c
@@ -74702,7 +74887,7 @@ index aa95e59..b681a63 100644
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
diff --git a/mm/shmem.c b/mm/shmem.c
-index d2eeca1..3f160be 100644
+index d2eeca1..92f3123 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -31,7 +31,7 @@
@@ -74723,7 +74908,35 @@ index d2eeca1..3f160be 100644
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
-@@ -2594,8 +2594,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2207,6 +2207,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+ struct { const char *prefix; size_t len; } arr[] = {
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
++#endif
++
+ { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ };
+@@ -2260,6 +2265,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+ if (err)
+ return err;
+
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++ return -EOPNOTSUPP;
++ if (size > 8)
++ return -EINVAL;
++ }
++#endif
++
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+
+@@ -2594,8 +2608,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -75718,7 +75931,7 @@ index 8c7265a..c96d884 100644
mm->unmap_area = arch_unmap_area;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 2bb90b1..ed47e53 100644
+index 2bb90b1..3795e47 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -75743,15 +75956,7 @@ index 2bb90b1..ed47e53 100644
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr)
- {
- pte_t *pte;
-+ int ret = -ENOMEM;
-
- /*
- * nr is a running index into the array which helps higher level
-@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
@@ -75761,33 +75966,29 @@ index 2bb90b1..ed47e53 100644
struct page *page = pages[*nr];
- if (WARN_ON(!pte_none(*pte)))
-- return -EBUSY;
-- if (WARN_ON(!page))
-- return -ENOMEM;
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+ if (pgprot_val(prot) & _PAGE_NX)
+#endif
+
-+ if (WARN_ON(!pte_none(*pte))) {
-+ ret = -EBUSY;
-+ goto out;
++ if (!pte_none(*pte)) {
++ pax_close_kernel();
++ WARN_ON(1);
+ return -EBUSY;
+- if (WARN_ON(!page))
+ }
-+ if (WARN_ON(!page)) {
-+ ret = -ENOMEM;
-+ goto out;
++ if (!page) {
++ pax_close_kernel();
++ WARN_ON(1);
+ return -ENOMEM;
+ }
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
-- return 0;
-+ ret = 0;
-+out:
+ pax_close_kernel();
-+ return ret;
+ return 0;
}
- static int vmap_pmd_range(pud_t *pud, unsigned long addr,
-@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
pmd_t *pmd;
unsigned long next;
@@ -75796,7 +75997,7 @@ index 2bb90b1..ed47e53 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
pud_t *pud;
unsigned long next;
@@ -75805,7 +76006,7 @@ index 2bb90b1..ed47e53 100644
if (!pud)
return -ENOMEM;
do {
-@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
+@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
* and fall back on vmalloc() if that fails. Others
* just put it in the vmalloc space.
*/
@@ -75827,7 +76028,7 @@ index 2bb90b1..ed47e53 100644
return is_vmalloc_addr(x);
}
-@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
@@ -75842,7 +76043,7 @@ index 2bb90b1..ed47e53 100644
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
-@@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
+@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
* Allocate a region of KVA of the specified size and alignment, within the
* vstart and vend.
*/
@@ -75851,7 +76052,7 @@ index 2bb90b1..ed47e53 100644
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
-@@ -1328,6 +1368,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -75868,7 +76069,7 @@ index 2bb90b1..ed47e53 100644
if (flags & VM_IOREMAP) {
int bit = fls(size);
-@@ -1568,6 +1618,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > totalram_pages)
return NULL;
@@ -75880,7 +76081,7 @@ index 2bb90b1..ed47e53 100644
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area)
-@@ -1669,6 +1724,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
@@ -75894,7 +76095,7 @@ index 2bb90b1..ed47e53 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
start, end, node, gfp_mask, caller);
if (!area)
-@@ -1842,10 +1904,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -75906,7 +76107,7 @@ index 2bb90b1..ed47e53 100644
-1, __builtin_return_address(0));
}
-@@ -2136,6 +2197,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
@@ -75915,7 +76116,7 @@ index 2bb90b1..ed47e53 100644
if ((PAGE_SIZE-1) & (unsigned long)addr)
return -EINVAL;
-@@ -2572,7 +2635,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2572,7 +2634,7 @@ static int s_show(struct seq_file *m, void *p)
{
struct vm_struct *v = p;
@@ -75924,6 +76125,19 @@ index 2bb90b1..ed47e53 100644
v->addr, v->addr + v->size, v->size);
if (v->caller)
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 99b434b..a018dfc 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2953,6 +2953,8 @@ static int kswapd(void *p)
+ &balanced_classzone_idx);
+ }
+ }
++
++ current->reclaim_state = NULL;
+ return 0;
+ }
+
diff --git a/mm/vmstat.c b/mm/vmstat.c
index df7a674..8b4a4f3 100644
--- a/mm/vmstat.c
@@ -76609,7 +76823,7 @@ index 0337e2b..47914a0 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 2fb9f59..d9a07df 100644
+index aed87a4..72cc526 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1138,9 +1138,13 @@ void dev_load(struct net *net, const char *name)
@@ -77303,10 +77517,10 @@ index d23c657..cb69cc2 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 2a1383c..ff99572 100644
+index c017cb1..9eb15b7d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -2523,7 +2523,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+@@ -2526,7 +2526,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
@@ -77315,42 +77529,11 @@ index 2a1383c..ff99572 100644
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
-diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
-index 813b43a..834857f 100644
---- a/net/ipv4/tcp_illinois.c
-+++ b/net/ipv4/tcp_illinois.c
-@@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
- .tcpv_rttcnt = ca->cnt_rtt,
- .tcpv_minrtt = ca->base_rtt,
- };
-- u64 t = ca->sum_rtt;
-
-- do_div(t, ca->cnt_rtt);
-- info.tcpv_rtt = t;
-+ if (info.tcpv_rttcnt > 0) {
-+ u64 t = ca->sum_rtt;
-
-+ do_div(t, info.tcpv_rttcnt);
-+ info.tcpv_rtt = t;
-+ }
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
- }
- }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index d377f48..f19e3ec 100644
+index c92c4da..f19e3ec 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
-@@ -4556,6 +4556,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
- struct tcphdr *th;
- bool fragstolen;
-
-+ if (size == 0)
-+ return 0;
-+
- skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
- if (!skb)
- goto err;
-@@ -4728,7 +4731,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+@@ -4731,7 +4731,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
* simplifies code)
*/
static void
@@ -78148,31 +78331,6 @@ index 34e4185..8823368 100644
} while (!res);
return res;
}
-diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
-index 3bfb34a..69bf48d 100644
---- a/net/l2tp/l2tp_eth.c
-+++ b/net/l2tp/l2tp_eth.c
-@@ -290,6 +290,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
-
- out_del_dev:
- free_netdev(dev);
-+ spriv->dev = NULL;
- out_del_session:
- l2tp_session_delete(session);
- out:
-diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
-index 5746d62..327aa07 100644
---- a/net/mac80211/ibss.c
-+++ b/net/mac80211/ibss.c
-@@ -1074,7 +1074,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
- sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
- sdata->u.ibss.ibss_join_req = jiffies;
-
-- memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
-+ memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
- sdata->u.ibss.ssid_len = params->ssid_len;
-
- mutex_unlock(&sdata->u.ibss.mtx);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bb61f77..3788d63 100644
--- a/net/mac80211/ieee80211_i.h
@@ -78331,111 +78489,11 @@ index c97a065..ff61928 100644
return -EFAULT;
return p;
-diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index 0cb4ede..884155d 100644
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -491,6 +491,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
-
- if (ieee80211_is_action(hdr->frame_control)) {
- u8 category;
-+
-+ /* make sure category field is present */
-+ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
-+ return RX_DROP_MONITOR;
-+
- mgmt = (struct ieee80211_mgmt *)hdr;
- category = mgmt->u.action.category;
- if (category != WLAN_CATEGORY_MESH_ACTION &&
-@@ -1426,7 +1431,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
- frag = sc & IEEE80211_SCTL_FRAG;
-
- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
-- (rx->skb)->len < 24 ||
- is_multicast_ether_addr(hdr->addr1))) {
- /* not fragmented */
- goto out;
-@@ -1849,6 +1853,20 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-+
-+ /* make sure fixed part of mesh header is there, also checks skb len */
-+ if (!pskb_may_pull(rx->skb, hdrlen + 6))
-+ return RX_DROP_MONITOR;
-+
-+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
-+
-+ /* make sure full mesh header is there, also checks skb len */
-+ if (!pskb_may_pull(rx->skb,
-+ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
-+ return RX_DROP_MONITOR;
-+
-+ /* reload pointers */
-+ hdr = (struct ieee80211_hdr *) skb->data;
- mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
-
- /* frame is in RMC, don't forward */
-@@ -1871,9 +1889,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
- if (is_multicast_ether_addr(hdr->addr1)) {
- mpp_addr = hdr->addr3;
- proxied_addr = mesh_hdr->eaddr1;
-- } else {
-+ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
-+ /* has_a4 already checked in ieee80211_rx_mesh_check */
- mpp_addr = hdr->addr4;
- proxied_addr = mesh_hdr->eaddr2;
-+ } else {
-+ return RX_DROP_MONITOR;
- }
-
- rcu_read_lock();
-@@ -2313,6 +2334,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
- }
- break;
- case WLAN_CATEGORY_SELF_PROTECTED:
-+ if (len < (IEEE80211_MIN_ACTION_SIZE +
-+ sizeof(mgmt->u.action.u.self_prot.action_code)))
-+ break;
-+
- switch (mgmt->u.action.u.self_prot.action_code) {
- case WLAN_SP_MESH_PEERING_OPEN:
- case WLAN_SP_MESH_PEERING_CLOSE:
-@@ -2331,6 +2356,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
- }
- break;
- case WLAN_CATEGORY_MESH_ACTION:
-+ if (len < (IEEE80211_MIN_ACTION_SIZE +
-+ sizeof(mgmt->u.action.u.mesh_action.action_code)))
-+ break;
-+
- if (!ieee80211_vif_is_mesh(&sdata->vif))
- break;
- if (mesh_action_is_path_sel(mgmt) &&
-@@ -2865,10 +2894,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
- local->dot11ReceivedFragmentCount++;
-
-- if (ieee80211_is_mgmt(fc))
-- err = skb_linearize(skb);
-- else
-+ if (ieee80211_is_mgmt(fc)) {
-+ /* drop frame if too short for header */
-+ if (skb->len < ieee80211_hdrlen(fc))
-+ err = -ENOBUFS;
-+ else
-+ err = skb_linearize(skb);
-+ } else {
- err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
-+ }
-
- if (err) {
- dev_kfree_skb(skb);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
-index c9b52f7..4da1014 100644
+index 1cfe6d5..c428ba3 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
-@@ -1251,7 +1251,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+@@ -1279,7 +1279,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
#endif
/* everything else happens only if HW was up & running */
@@ -78773,10 +78831,10 @@ index 4fe4fb4..87a89e5 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 9172179..a4035c4 100644
+index 0426b67..d6ddaca 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
-@@ -769,7 +769,7 @@ static void netlink_overrun(struct sock *sk)
+@@ -780,7 +780,7 @@ static void netlink_overrun(struct sock *sk)
sk->sk_error_report(sk);
}
}
@@ -78785,7 +78843,7 @@ index 9172179..a4035c4 100644
}
static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
-@@ -2059,7 +2059,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -2070,7 +2070,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb,
atomic_read(&s->sk_refcnt),
@@ -80023,27 +80081,6 @@ index bc7430b..35349de 100644
struct rfkill *rfkill;
struct work_struct rfkill_sync;
-diff --git a/net/wireless/util.c b/net/wireless/util.c
-index 994e2f0..f67aeb1 100644
---- a/net/wireless/util.c
-+++ b/net/wireless/util.c
-@@ -309,7 +309,7 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
- }
- EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
-
--static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
-+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
- {
- int ae = meshhdr->flags & MESH_FLAGS_AE;
- /* 7.1.3.5a.2 */
-@@ -326,6 +326,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
- return 6;
- }
- }
-+EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
-
- int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype)
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index b0eb7aa..7d73e82 100644
--- a/net/wireless/wext-core.c
@@ -81829,7 +81866,7 @@ index ffd2025..df062c9 100644
/* PCM3052 register definitions */
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
-index 08fde00..0bf641a 100644
+index 4c1cc51..16040040 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
@@ -81917,10 +81954,10 @@ index 91cdf94..4085161 100644
if (err < 0)
return err;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
-index 53b5ada..2db94c8 100644
+index bf3bf43..3826cbc 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
-@@ -2780,11 +2780,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
+@@ -2803,11 +2803,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
@@ -87665,10 +87702,10 @@ index 0000000..9332f17
+_003896_hash nfs_parse_server_name 2 1899 _003896_hash NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..244559e
+index 0000000..1aa0dce
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,1879 @@
+@@ -0,0 +1,1865 @@
+/*
+ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -87746,10 +87783,10 @@ index 0000000..244559e
+static unsigned int handle_function(void);
+static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
+static tree get_size_overflow_type(gimple stmt, const_tree node);
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3);
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20120930beta",
++ .version = "20121113beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -88068,6 +88105,7 @@ index 0000000..244559e
+ tree type = TREE_TYPE(rhs1);
+ tree lhs = create_new_var(type);
+
++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
+ gimple_set_lhs(assign, make_ssa_name(lhs, assign));
+
@@ -88133,42 +88171,41 @@ index 0000000..244559e
+ return assign;
+}
+
-+static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
++static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
+{
-+ const_gimple assign;
++ gimple assign;
+ gimple_stmt_iterator gsi;
+
-+ if (new_rhs1 == NULL_TREE)
++ if (rhs == NULL_TREE)
+ return NULL_TREE;
+
-+ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
-+ gsi = gsi_for_stmt(stmt);
-+ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
-+ return gimple_get_lhs(assign);
-+ }
-+ return new_rhs1;
++ if (types_compatible_p(TREE_TYPE(rhs), size_overflow_type) && gimple_plf(stmt, MY_STMT))
++ return rhs;
++
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before);
++ gimple_set_plf(assign, MY_STMT, true);
++ return gimple_get_lhs(assign);
+}
+
-+static tree follow_overflow_type_and_dup(struct pointer_set_t *visited, gimple stmt, const_tree node, tree new_rhs1, tree new_rhs2, tree new_rhs3)
++static tree cast_to_TI_type(gimple stmt, tree node)
+{
-+ tree size_overflow_type = get_size_overflow_type(stmt, node);
-+
-+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
-+
-+ if (new_rhs2 != NULL_TREE)
-+ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt;
++ tree type = TREE_TYPE(node);
+
-+ if (new_rhs3 != NULL_TREE)
-+ new_rhs3 = cast_to_new_size_overflow_type(stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
++ if (types_compatible_p(type, intTI_type_node))
++ return node;
+
-+ return dup_assign(visited, stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ gimple_set_plf(cast_stmt, MY_STMT, true);
++ return gimple_get_lhs(cast_stmt);
+}
+
-+
+static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
+{
-+ tree size_overflow_type, lhs;
-+ gimple stmt;
++ tree lhs;
+ gimple_stmt_iterator gsi;
+
+ if (rhs1 == NULL_TREE) {
@@ -88206,18 +88243,14 @@ index 0000000..244559e
+ oldstmt = gsi_stmt(gsi);
+ }
+
-+ size_overflow_type = get_size_overflow_type(oldstmt, lhs);
-+
-+ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
-+ gimple_set_plf(stmt, MY_STMT, true);
-+ return gimple_get_lhs(stmt);
++ return cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
+}
+
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ gimple stmt;
+ gimple_stmt_iterator gsi;
-+ tree new_var, lhs = gimple_get_lhs(oldstmt);
++ tree size_overflow_type, new_var, lhs = gimple_get_lhs(oldstmt);
+
+ if (gimple_plf(oldstmt, MY_STMT))
+ return lhs;
@@ -88238,6 +88271,8 @@ index 0000000..244559e
+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
+
++ size_overflow_type = get_size_overflow_type(oldstmt, node);
++
+ if (is_bool(lhs))
+ new_var = SSA_NAME_VAR(lhs);
+ else
@@ -88246,7 +88281,7 @@ index 0000000..244559e
+ gimple_set_lhs(stmt, new_var);
+
+ if (rhs1 != NULL_TREE) {
-+ if (!gimple_assign_cast_p(oldstmt))
++ if (!gimple_assign_cast_p(oldstmt) && TREE_CODE_CLASS(gimple_assign_rhs_code(oldstmt)) != tcc_comparison)
+ rhs1 = cast_a_tree(size_overflow_type, rhs1);
+ gimple_assign_set_rhs1(stmt, rhs1);
+ }
@@ -88498,7 +88533,7 @@ index 0000000..244559e
+ return lhs;
+
+ if (gimple_plf(stmt, NO_CAST_CHECK))
-+ return follow_overflow_type_and_dup(visited, stmt, rhs1, new_rhs1, NULL_TREE, NULL_TREE);
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+
+ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
+ size_overflow_type = get_size_overflow_type(stmt, rhs1);
@@ -88508,7 +88543,7 @@ index 0000000..244559e
+ }
+
+ if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
-+ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+
+ size_overflow_type = get_size_overflow_type(stmt, rhs1);
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
@@ -88693,8 +88728,8 @@ index 0000000..244559e
+ cast_rhs_type = TREE_TYPE(cast_rhs);
+ type_max_type = TREE_TYPE(type_max);
+ type_min_type = TREE_TYPE(type_min);
-+ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
-+ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+
+ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
+ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
@@ -88722,7 +88757,7 @@ index 0000000..244559e
+ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
+ return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
+
-+ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
++ if (!types_compatible_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
+ debug_gimple_stmt(def_stmt);
+ gcc_unreachable();
+ }
@@ -88739,64 +88774,20 @@ index 0000000..244559e
+ return true;
+}
+
-+static tree get_cast_def_stmt_rhs(const_tree new_rhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(new_rhs);
-+ // get_size_overflow_type
-+ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
-+ gcc_assert(gimple_assign_cast_p(def_stmt));
-+ return gimple_assign_rhs1(def_stmt);
-+}
-+
-+static tree cast_to_int_TI_type_and_check(gimple stmt, tree new_rhs)
-+{
-+ gimple_stmt_iterator gsi;
-+ const_gimple cast_stmt;
-+ gimple def_stmt;
-+ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
-+
-+ if (mode != TImode && mode != DImode) {
-+ def_stmt = get_def_stmt(new_rhs);
-+ gcc_assert(gimple_assign_cast_p(def_stmt));
-+ new_rhs = gimple_assign_rhs1(def_stmt);
-+ mode = TYPE_MODE(TREE_TYPE(new_rhs));
-+ }
-+
-+ gcc_assert(mode == TImode || mode == DImode);
-+
-+ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
-+ return new_rhs;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
-+ new_rhs = gimple_get_lhs(cast_stmt);
-+
-+ if (mode == DImode)
-+ return new_rhs;
-+
-+ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, BEFORE_STMT);
-+
-+ return new_rhs;
-+}
-+
-+static bool is_an_integer_trunction(const_gimple stmt)
++static bool is_subtraction_special(const_gimple stmt)
+{
+ gimple rhs1_def_stmt, rhs2_def_stmt;
-+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
-+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
+ const_tree rhs1 = gimple_assign_rhs1(stmt);
+ const_tree rhs2 = gimple_assign_rhs2(stmt);
-+ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
-+ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
+
+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
+ return false;
+
+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
+ return false;
+
+ rhs1_def_stmt = get_def_stmt(rhs1);
@@ -88806,9 +88797,15 @@ index 0000000..244559e
+
+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_lhs = gimple_get_lhs(rhs1_def_stmt);
++ rhs2_def_stmt_lhs = gimple_get_lhs(rhs2_def_stmt);
+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
-+ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
++ return false;
++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
+ return false;
+
+ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
@@ -88816,37 +88813,63 @@ index 0000000..244559e
+ return true;
+}
+
++static tree get_def_stmt_rhs(const_tree var)
++{
++ tree rhs1, def_stmt_rhs1;
++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
++
++ def_stmt = get_def_stmt(var);
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ gcc_assert(gimple_code(rhs1_def_stmt) != GIMPLE_NOP);
++ if (!gimple_assign_cast_p(rhs1_def_stmt))
++ return rhs1;
++
++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
++
++ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
++ case GIMPLE_CALL:
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ return def_stmt_rhs1;
++ case GIMPLE_ASSIGN:
++ return rhs1;
++ default:
++ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
++ gcc_unreachable();
++ }
++}
++
+static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
+{
+ tree new_rhs1, new_rhs2;
+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
+ gimple assign, stmt = get_def_stmt(lhs);
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree rhs2 = gimple_assign_rhs2(stmt);
+
-+ if (!is_an_integer_trunction(stmt))
++ if (!is_subtraction_special(stmt))
+ return NULL_TREE;
+
+ new_rhs1 = expand(visited, rhs1);
+ new_rhs2 = expand(visited, rhs2);
+
-+ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
-+
-+ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
-+ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
+
-+ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
-+ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs1_def_stmt_rhs1);
-+ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs2_def_stmt_rhs1);
++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
+ }
+
+ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
+ new_lhs = gimple_get_lhs(assign);
+ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
+
-+ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
@@ -88949,7 +88972,7 @@ index 0000000..244559e
+ if (is_a_constant_overflow(def_stmt, rhs1))
+ return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2);
+
-+ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+#if BUILDING_GCC_VERSION >= 4007
@@ -88976,7 +88999,7 @@ index 0000000..244559e
+ new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
+ new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
+
-+ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
+}
+#endif
+