summaryrefslogtreecommitdiffstats
path: root/main/linux-grsec
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2012-03-12 12:38:13 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2012-03-12 15:33:54 +0000
commitce55fee0bbc4ab5a81b930d244d716a3c4273983 (patch)
tree6e563f0f3e2d6ea7ce8d5348108092d3f08dae32 /main/linux-grsec
parent4fc2408b8f327b1313c349b63f9eec60d8cfe13d (diff)
downloadaports-ce55fee0bbc4ab5a81b930d244d716a3c4273983.tar.bz2
aports-ce55fee0bbc4ab5a81b930d244d716a3c4273983.tar.xz
main/linux-grsec: upgrade to 3.2.9 update mtu fix patch
The mtu regression seems to have been fixed upstream. We backport.
Diffstat (limited to 'main/linux-grsec')
-rw-r--r--main/linux-grsec/APKBUILD16
-rw-r--r--main/linux-grsec/grsecurity-2.9-3.2.9-201203112136.patch (renamed from main/linux-grsec/grsecurity-2.9-3.2.7-201202261954.patch)2131
-rw-r--r--main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch174
-rw-r--r--main/linux-grsec/linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch53
-rw-r--r--main/linux-grsec/route-remove-redirect-genid.patch81
5 files changed, 2036 insertions, 419 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 6eba993dc..13a6bed57 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,9 +2,9 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.2.7
+pkgver=3.2.9
_kernver=3.2
-pkgrel=1
+pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
@@ -14,12 +14,13 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.0/linux-$_kernver.tar.bz2
http://ftp.kernel.org/pub/linux/kernel/v3.0/patch-$pkgver.bz2
- grsecurity-2.9-3.2.7-201202261954.patch
+ grsecurity-2.9-3.2.9-201203112136.patch
0004-arp-flush-arp-cache-on-device-change.patch
x86-centaur-enable-cx8-for-via-eden-too.patch
- linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
+ inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
+ route-remove-redirect-genid.patch
kernelconfig.x86
kernelconfig.x86_64
@@ -140,10 +141,11 @@ dev() {
}
md5sums="7ceb61f87c097fc17509844b71268935 linux-3.2.tar.bz2
-899624bffed6a19578613b672cc9483f patch-3.2.7.bz2
-27f2e8898e796ff0301f3193e2ba76b3 grsecurity-2.9-3.2.7-201202261954.patch
+4610f3e62a5446422d1e81a90ab3cd06 patch-3.2.9.bz2
+349de864a65ad6714e20bf8a14dd8756 grsecurity-2.9-3.2.9-201203112136.patch
776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch
f3eda7112ef074a4121ec6de943c63ee x86-centaur-enable-cx8-for-via-eden-too.patch
-62cc7d7b5ba7ef05b72ff91c0411c189 linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
+0e57daa3b43acadd82ae66fa9e3f7da1 inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
+06061e5de624849e082c3c8dbe37c908 route-remove-redirect-genid.patch
a4e7d46b18ca1495a1605c8520d74ee3 kernelconfig.x86
147306257d376f27799e9e72a303c80c kernelconfig.x86_64"
diff --git a/main/linux-grsec/grsecurity-2.9-3.2.7-201202261954.patch b/main/linux-grsec/grsecurity-2.9-3.2.9-201203112136.patch
index 5978e7ccc..f67d96792 100644
--- a/main/linux-grsec/grsecurity-2.9-3.2.7-201202261954.patch
+++ b/main/linux-grsec/grsecurity-2.9-3.2.9-201203112136.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index dfa6fc6..0095943 100644
+index dfa6fc6..0aa3907 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -5,6 +5,7 @@
@@ -96,15 +96,16 @@ index dfa6fc6..0095943 100644
keywords.c
ksym.c*
ksym.h*
-@@ -154,7 +168,6 @@ kxgettext
+@@ -154,7 +168,7 @@ kxgettext
lkc_defs.h
lex.c
lex.*.c
-linux
++lib1funcs.S
logo_*.c
logo_*_clut224.c
logo_*_mono.c
-@@ -166,14 +179,15 @@ machtypes.h
+@@ -166,14 +180,15 @@ machtypes.h
map
map_hugetlb
maui_boot.h
@@ -121,7 +122,7 @@ index dfa6fc6..0095943 100644
mkprep
mkregtable
mktables
-@@ -209,6 +223,7 @@ r300_reg_safe.h
+@@ -209,6 +224,7 @@ r300_reg_safe.h
r420_reg_safe.h
r600_reg_safe.h
recordmcount
@@ -129,7 +130,7 @@ index dfa6fc6..0095943 100644
relocs
rlim_names.h
rn50_reg_safe.h
-@@ -219,6 +234,7 @@ setup
+@@ -219,6 +235,7 @@ setup
setup.bin
setup.elf
sImage
@@ -137,7 +138,7 @@ index dfa6fc6..0095943 100644
sm_tbl*
split-include
syscalltab.h
-@@ -229,6 +245,7 @@ tftpboot.img
+@@ -229,6 +246,7 @@ tftpboot.img
timeconst.h
times.h*
trix_boot.h
@@ -145,7 +146,7 @@ index dfa6fc6..0095943 100644
utsrelease.h*
vdso-syms.lds
vdso.lds
-@@ -246,7 +263,9 @@ vmlinux
+@@ -246,7 +264,9 @@ vmlinux
vmlinux-*
vmlinux.aout
vmlinux.bin.all
@@ -155,7 +156,7 @@ index dfa6fc6..0095943 100644
vmlinuz
voffset.h
vsyscall.lds
-@@ -254,9 +273,11 @@ vsyscall_32.lds
+@@ -254,9 +274,11 @@ vsyscall_32.lds
wanxlfw.inc
uImage
unifdef
@@ -186,7 +187,7 @@ index 81c287f..d456d02 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index d1bdc90..c985d2a 100644
+index 5f1739b..abd56ea 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -212,7 +213,7 @@ index d1bdc90..c985d2a 100644
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-@@ -564,6 +565,48 @@ else
+@@ -564,6 +565,50 @@ else
KBUILD_CFLAGS += -O2
endif
@@ -238,7 +239,9 @@ index d1bdc90..c985d2a 100644
+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
+endif
+endif
-+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
+ifeq ($(KBUILD_EXTMOD),)
@@ -261,7 +264,7 @@ index d1bdc90..c985d2a 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -708,7 +751,7 @@ export mod_strip_cmd
+@@ -708,7 +753,7 @@ export mod_strip_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -270,7 +273,7 @@ index d1bdc90..c985d2a 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -932,6 +975,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+@@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -279,7 +282,7 @@ index d1bdc90..c985d2a 100644
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -941,7 +986,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+@@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -288,7 +291,7 @@ index d1bdc90..c985d2a 100644
$(Q)$(MAKE) $(build)=$@
# Store (new) KERNELRELASE string in include/config/kernel.release
-@@ -985,6 +1030,7 @@ prepare0: archprepare FORCE
+@@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=.
# All the preparing..
@@ -296,7 +299,7 @@ index d1bdc90..c985d2a 100644
prepare: prepare0
# Generate some files
-@@ -1086,6 +1132,8 @@ all: modules
+@@ -1086,6 +1134,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -305,7 +308,7 @@ index d1bdc90..c985d2a 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1101,7 +1149,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1101,7 +1151,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -314,7 +317,7 @@ index d1bdc90..c985d2a 100644
# Target to install modules
PHONY += modules_install
-@@ -1198,6 +1246,7 @@ distclean: mrproper
+@@ -1198,6 +1248,7 @@ distclean: mrproper
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' \
@@ -322,7 +325,7 @@ index d1bdc90..c985d2a 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1358,6 +1407,8 @@ PHONY += $(module-dirs) modules
+@@ -1358,6 +1409,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -331,7 +334,7 @@ index d1bdc90..c985d2a 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1484,17 +1535,21 @@ else
+@@ -1484,17 +1537,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -357,7 +360,7 @@ index d1bdc90..c985d2a 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1504,11 +1559,15 @@ endif
+@@ -1504,11 +1561,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -396,6 +399,32 @@ index 640f909..48b6597 100644
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #endif
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index da5449e..7418343 100644
--- a/arch/alpha/include/asm/elf.h
@@ -645,7 +674,7 @@ index fadd5f8..904e73a 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 86976d0..6610950 100644
+index 86976d0..35bff41 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,6 +15,10 @@
@@ -659,7 +688,163 @@ index 86976d0..6610950 100644
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
-@@ -239,6 +243,14 @@ typedef struct {
+@@ -27,6 +31,9 @@
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic_set(v,i) (((v)->counter) = (i))
+
++#define atomic_read_unchecked(v) (*(volatile int *)&(v)->counter)
++#define atomic_set_unchecked(v,i) (((v)->counter) = (i))
++
+ #if __LINUX_ARM_ARCH__ >= 6
+
+ /*
+@@ -42,6 +49,28 @@ static inline void atomic_add(int i, atomic_t *v)
+ __asm__ __volatile__("@ atomic_add\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_add_unchecked\n"
++"1: ldrex %0, [%3]\n"
++" add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -60,6 +89,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ __asm__ __volatile__("@ atomic_add_return\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
++"1: ldrex %0, [%3]\n"
++" add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -80,6 +137,28 @@ static inline void atomic_sub(int i, atomic_t *v)
+ __asm__ __volatile__("@ atomic_sub\n"
+ "1: ldrex %0, [%3]\n"
+ " sub %0, %0, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_sub_unchecked\n"
++"1: ldrex %0, [%3]\n"
++" sub %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -132,6 +211,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long tmp, tmp2;
+@@ -207,6 +308,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ #endif /* __LINUX_ARM_ARCH__ */
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new))
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+@@ -219,11 +321,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+
+ #define atomic_inc(v) atomic_add(1, v)
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, v)
+ #define atomic_dec(v) atomic_sub(1, v)
++#define atomic_dec_unchecked(v) atomic_sub_unchecked(1, v)
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0)
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++#define atomic_inc_return_unchecked(v) (atomic_add_return_unchecked(1, v))
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -239,6 +345,14 @@ typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
@@ -674,23 +859,217 @@ index 86976d0..6610950 100644
#define ATOMIC64_INIT(i) { (i) }
static inline u64 atomic64_read(atomic64_t *v)
-@@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+@@ -254,6 +368,19 @@ static inline u64 atomic64_read(atomic64_t *v)
+ return result;
+ }
+
++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ u64 result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, u64 i)
+ {
+ u64 tmp;
+@@ -268,6 +395,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ : "cc");
+ }
+
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
++{
++ u64 tmp;
++
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
+ static inline void atomic64_add(u64 i, atomic64_t *v)
+ {
+ u64 result;
+@@ -277,6 +418,29 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ __asm__ __volatile__("@ atomic64_add_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" adds %0, %0, %4\n"
++" adc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -296,6 +460,35 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" adds %0, %0, %4\n"
++" adc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -317,6 +510,29 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+ "1: ldrexd %0, %H0, [%3]\n"
+ " subs %0, %0, %4\n"
+ " sbc %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "bvc 2f\n"
++ "\tbkpt 0xf103\n2:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" subs %0, %0, %4\n"
++" sbc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+@@ -372,6 +588,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+ return oldval;
+ }
+
++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
++{
++ u64 oldval;
++ unsigned long res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
++ "ldrexd %1, %H1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "teqeq %H1, %H4\n"
++ "strexdeq %0, %5, %H5, [%3]"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "r" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ {
+ u64 result;
+@@ -451,10 +691,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..2255c86 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++#include <linux/const.h>
+
- #endif /* !CONFIG_GENERIC_ATOMIC64 */
- #endif
- #endif
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index d5d8d5c..ad92c96 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -108,7 +108,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+
+ /*
+ * Select the calling method
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d..6ef1e03 100644
--- a/arch/arm/include/asm/elf.h
@@ -734,6 +1113,54 @@ index e51b1e8..32a3113 100644
KM_TYPE_NR
};
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index 53426c6..c7baff3 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -35,7 +35,7 @@ struct outer_cache_fns {
+ #endif
+ void (*set_debug)(unsigned long);
+ void (*resume)(void);
+-};
++} __no_const;
+
+ #ifdef CONFIG_OUTER_CACHE
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index ca94653..6ac0d56 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -123,7 +123,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index 984014b..92345b9 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
+
+ #define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++#define xchg_unchecked(ptr,x) \
++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+ extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+@@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void);
+ extern void cpu_init(void);
+
+ void arm_machine_restart(char mode, const char *cmd);
+-extern void (*arm_pm_restart)(char str, const char *cmd);
++extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
+
+ #define UDBG_UNDEFINED (1 << 0)
+ #define UDBG_SYSCALL (1 << 1)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index b293616..96310e5 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -809,7 +1236,7 @@ index 5b0bce6..becd81c 100644
EXPORT_SYMBOL(__get_user_1);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 3d0c6fb..3dcae52 100644
+index 3d0c6fb..9d326fa 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,7 +28,6 @@
@@ -820,7 +1247,33 @@ index 3d0c6fb..3dcae52 100644
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
-@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused)
+ __setup("nohlt", nohlt_setup);
+ __setup("hlt", hlt_setup);
+
+-void arm_machine_restart(char mode, const char *cmd)
++__noreturn void arm_machine_restart(char mode, const char *cmd)
+ {
+ /* Disable interrupts first */
+ local_irq_disable();
+@@ -134,7 +133,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
++void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
+ EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+ static void do_nothing(void *unused)
+@@ -248,6 +247,7 @@ void machine_power_off(void)
+ machine_shutdown();
+ if (pm_power_off)
+ pm_power_off();
++ BUG();
+ }
+
+ void machine_restart(char *cmd)
+@@ -484,12 +484,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -833,6 +1286,27 @@ index 3d0c6fb..3dcae52 100644
#ifdef CONFIG_MMU
/*
* The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 8fc2c8f..064c150 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -108,13 +108,13 @@ struct processor processor __read_mostly;
+ struct cpu_tlb_fns cpu_tlb __read_mostly;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user __read_mostly;
++struct cpu_user_fns cpu_user __read_only;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache __read_mostly;
++struct cpu_cache_fns cpu_cache __read_only;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache __read_mostly;
++struct outer_cache_fns outer_cache __read_only;
+ EXPORT_SYMBOL(outer_cache);
+ #endif
+
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 99a5727..a3d5bb1 100644
--- a/arch/arm/kernel/traps.c
@@ -883,6 +1357,18 @@ index 66a477a..bee61d3 100644
.pushsection .fixup,"ax"
.align 0
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index d066df6..df28194 100644
--- a/arch/arm/lib/copy_to_user.S
@@ -1098,6 +1584,48 @@ index 44b628e..623ee2a 100644
/*
* Remember the place where we stopped the search:
*/
+diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
+index 4c1a363..df311d0 100644
+--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
+@@ -41,7 +41,7 @@ struct samsung_dma_ops {
+ int (*started)(unsigned ch);
+ int (*flush)(unsigned ch);
+ int (*stop)(unsigned ch);
+-};
++} __no_const;
+
+ extern void *samsung_dmadev_get_ops(void);
+ extern void *s3c_dma_get_ops(void);
+diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
+index 5f28cae..3d23723 100644
+--- a/arch/arm/plat-samsung/include/plat/ehci.h
++++ b/arch/arm/plat-samsung/include/plat/ehci.h
+@@ -14,7 +14,7 @@
+ struct s5p_ehci_platdata {
+ int (*phy_init)(struct platform_device *pdev, int type);
+ int (*phy_exit)(struct platform_device *pdev, int type);
+-};
++} __no_const;
+
+ extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
+
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index 3b3159b..425ea94 100644
--- a/arch/avr32/include/asm/elf.h
@@ -1177,6 +1705,60 @@ index f7040a1..db9f300 100644
if (exception_trace && printk_ratelimit())
printk("%s%s[%d]: segfault at %08lx pc %08lx "
"sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
+ #include <linux/linkage.h> /* for asmlinkage */
+
+ /*
+@@ -14,7 +15,7 @@
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 1de779f..336fad3 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0d8a7d6..d0c9ff5 100644
--- a/arch/frv/include/asm/atomic.h
@@ -1198,6 +1780,23 @@ index 0d8a7d6..d0c9ff5 100644
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401d 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
index f8e16b2..c73ff79 100644
--- a/arch/frv/include/asm/kmap_types.h
@@ -1242,6 +1841,40 @@ index 385fd30..6c3d97e 100644
goto success;
addr = vma->vm_end;
}
+diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
+index c635028..6d9445a 100644
+--- a/arch/h8300/include/asm/cache.h
++++ b/arch/h8300/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ARCH_H8300_CACHE_H
+ #define __ARCH_H8300_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+-#define L1_CACHE_BYTES 4
++#define L1_CACHE_BYTES _AC(4,UL)
+
+ /* m68k-elf-gcc 2.95.2 doesn't like these */
+
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 0f01de2..d37d309 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
+ #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 3fad89e..3047da5 100644
--- a/arch/ia64/include/asm/atomic.h
@@ -1263,6 +1896,27 @@ index 3fad89e..3047da5 100644
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index b5298eb..67c6e62 100644
--- a/arch/ia64/include/asm/elf.h
@@ -1578,6 +2232,22 @@ index 00cb0e2..2ad8024 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee9..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
index 82abd15..d95ae5d 100644
--- a/arch/m32r/lib/usercopy.c
@@ -1602,6 +2272,41 @@ index 82abd15..d95ae5d 100644
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 1d93f81..67794d0 100644
--- a/arch/mips/include/asm/atomic.h
@@ -1634,6 +2339,23 @@ index 1d93f81..67794d0 100644
#endif /* CONFIG_64BIT */
/*
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 455c0ac..ad65fbe 100644
--- a/arch/mips/include/asm/elf.h
@@ -1852,6 +2574,66 @@ index 302d779..7d35bf8 100644
-
- return ret;
-}
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * L1 cache
+ */
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+-#define L1_CACHE_BYTES 32 /* bytes per entry */
+ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 4ce7a01..449202a 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,11 +19,13 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
+
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 4054b31..a10c105 100644
--- a/arch/parisc/include/asm/atomic.h
@@ -1873,6 +2655,34 @@ index 4054b31..a10c105 100644
#endif /* !CONFIG_64BIT */
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 47f11c7..3420df2 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+@@ -15,13 +16,13 @@
+ * just ruin performance.
+ */
+ #ifdef CONFIG_PA20
+-#define L1_CACHE_BYTES 64
+ #define L1_CACHE_SHIFT 6
+ #else
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 19f6cb1..6c78cf2 100644
--- a/arch/parisc/include/asm/elf.h
@@ -2256,6 +3066,27 @@ index 02e41b5..ec6e26c 100644
#endif /* __powerpc64__ */
#endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index 4b50941..5605819 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,7 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +23,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 3bf9cca..e7457d0 100644
--- a/arch/powerpc/include/asm/elf.h
@@ -2636,6 +3467,34 @@ index cf9c69b..ebc9640 100644
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 745c1e7..59d97a6 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -547,9 +547,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
+ host->ops = ops;
+ host->of_node = of_node_get(of_node);
+
+- if (host->ops->match == NULL)
+- host->ops->match = default_irq_host_match;
+-
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* If it's a legacy controller, check for duplicates and
+@@ -622,7 +619,12 @@ struct irq_host *irq_find_host(struct device_node *node)
+ */
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link)
+- if (h->ops->match(h, node)) {
++ if (h->ops->match) {
++ if (h->ops->match(h, node)) {
++ found = h;
++ break;
++ }
++ } else if (default_irq_host_match(h, node)) {
+ found = h;
+ break;
+ }
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 0b6d796..d760ddb 100644
--- a/arch/powerpc/kernel/module_32.c
@@ -3104,6 +3963,22 @@ index 8517d2a..d2738d4 100644
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 2a30d5a..5e5586f 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -11,8 +11,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD 32
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 547f1a6..0b22b53 100644
--- a/arch/s390/include/asm/elf.h
@@ -3351,6 +4226,21 @@ index f09c748..cf9ec1d 100644
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
index 589d5c7..669e274 100644
--- a/arch/score/include/asm/system.h
@@ -3377,6 +4267,23 @@ index 25d0803..d6c8e36 100644
-{
- return sp;
-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index afeb710..d1d1289 100644
--- a/arch/sh/mm/mmap.c
@@ -3480,17 +4387,19 @@ index ad1fb5d..fc5315b 100644
VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
VMLINUX_MAIN += $(drivers-y) $(net-y)
-diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
-index 8ff83d8..4a459c2 100644
---- a/arch/sparc/include/asm/atomic.h
-+++ b/arch/sparc/include/asm/atomic.h
-@@ -4,5 +4,6 @@
- #include <asm/atomic_64.h>
- #else
- #include <asm/atomic_32.h>
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index 5c3c8b6..ba822fa 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -13,6 +13,8 @@
+
+ #include <linux/types.h>
+
+#include <asm-generic/atomic64.h>
- #endif
- #endif
++
+ #ifdef __KERNEL__
+
+ #include <asm/system.h>
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 9f421df..b81fc12 100644
--- a/arch/sparc/include/asm/atomic_64.h
@@ -3682,15 +4591,20 @@ index 9f421df..b81fc12 100644
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
-index 69358b5..17b4745 100644
+index 69358b5..9d0d492 100644
--- a/arch/sparc/include/asm/cache.h
+++ b/arch/sparc/include/asm/cache.h
-@@ -10,7 +10,7 @@
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES 32
-+#define L1_CACHE_BYTES 32UL
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
#ifdef CONFIG_SPARC32
#define SMP_CACHE_BYTES_SHIFT 5
@@ -5739,6 +6653,24 @@ index 27fe667..36d474c 100644
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 392e533..536b092 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/chip.h>
+
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 7730af6..cce5b19 100644
--- a/arch/um/Makefile
@@ -5754,6 +6686,26 @@ index 7730af6..cce5b19 100644
#This will adjust *FLAGS accordingly to the platform.
include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
index 6c03acd..a5e0215 100644
--- a/arch/um/include/asm/kmap_types.h
@@ -5807,6 +6759,23 @@ index c533835..84db18e 100644
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index efb4294..61bc18c 100644
--- a/arch/x86/Kconfig
@@ -6839,7 +7808,7 @@ index 7bcf3fc..f53832f 100644
+ pax_force_retaddr 0, 1
ret
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index fd84387..0b4af7d 100644
+index fd84387..887aa7e 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
@@ -6851,6 +7820,34 @@ index fd84387..0b4af7d 100644
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
+@@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->cached_hole_size = 0;
+
++ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
++ if (retval < 0) {
++ /* Someone check-me: is this error path enough? */
++ send_sig(SIGKILL, current, 0);
++ return retval;
++ }
++
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
+@@ -410,13 +419,6 @@ beyond_if:
+
+ set_brk(current->mm->start_brk, current->mm->brk);
+
+- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+- if (retval < 0) {
+- /* Someone check-me: is this error path enough? */
+- send_sig(SIGKILL, current, 0);
+- return retval;
+- }
+-
+ current->mm->start_stack =
+ (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
+ /* start thread */
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 6557769..ef6ae89 100644
--- a/arch/x86/ia32/ia32_signal.c
@@ -8830,7 +9827,7 @@ index eb92a6e..b98b2f4 100644
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
-index c9e09ea..73888df 100644
+index a850b4d..bae26dc 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
@@ -8857,31 +9854,15 @@ index c9e09ea..73888df 100644
/*
* Clear the bytes not touched by the fxsave and reserved
* for the SW usage.
-@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
- #endif /* CONFIG_X86_64 */
-
- /* We need a safe address that is cheap to find and that is already
-- in L1 during context switch. The best choices are unfortunately
-- different for UP and SMP */
--#ifdef CONFIG_SMP
--#define safe_address (__per_cpu_offset[0])
--#else
--#define safe_address (kstat_cpu(0).cpustat.user)
--#endif
-+ in L1 during context switch. */
-+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+@@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
+ static inline bool interrupted_user_mode(void)
+ {
+ struct pt_regs *regs = get_irq_regs();
+- return regs && user_mode_vm(regs);
++ return regs && user_mode(regs);
+ }
/*
- * These must be called with preempt disabled
-@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
- struct thread_info *me = current_thread_info();
- preempt_disable();
- if (me->status & TS_USEDFPU)
-- __save_init_fpu(me->task);
-+ __save_init_fpu(current);
- else
- clts();
- }
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eef..99f81ae 100644
--- a/arch/x86/include/asm/io.h
@@ -9985,7 +10966,7 @@ index 013286a..8b42f4f 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index b650435..eefa566 100644
+index bb3ee36..781a6b8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -268,7 +268,7 @@ struct tss_struct {
@@ -9997,7 +10978,7 @@ index b650435..eefa566 100644
/*
* Save the original ist values for checking stack pointers during debugging
-@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE
@@ -10018,7 +10999,7 @@ index b650435..eefa566 100644
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
-@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define INIT_TSS { \
.x86_tss = { \
@@ -10027,7 +11008,7 @@ index b650435..eefa566 100644
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
@@ -10040,7 +11021,7 @@ index b650435..eefa566 100644
/*
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
@@ -10049,7 +11030,7 @@ index b650435..eefa566 100644
__regs__ - 1; \
})
-@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*
* User space process size. 47bits minus one guard page.
*/
@@ -10065,7 +11046,7 @@ index b650435..eefa566 100644
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
@@ -10079,7 +11060,7 @@ index b650435..eefa566 100644
}
/*
-@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -10610,7 +11591,7 @@ index 2d2f01c..f985723 100644
/*
* Force strict CPU ordering.
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index a1fe5c1..ee326d8 100644
+index d7ef849..6af292e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -10,6 +10,7 @@
@@ -10754,7 +11735,7 @@ index a1fe5c1..ee326d8 100644
#endif
#endif /* !X86_32 */
-@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
+@@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
extern void free_thread_info(struct thread_info *ti);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define arch_task_cache_init arch_task_cache_init
@@ -16624,7 +17605,7 @@ index ee5d4fb..426649b 100644
+}
+#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 795b79f..063767a 100644
+index 8598296..bfadef0 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -16675,10 +17656,10 @@ index 795b79f..063767a 100644
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ struct tss_struct *tss = init_tss + cpu;
- bool preload_fpu;
+ fpu_switch_t fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
lazy_save_gs(prev->gs);
@@ -16689,32 +17670,32 @@ index 795b79f..063767a 100644
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
-@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_end_context_switch(next_p);
+ percpu_write(current_task, next_p);
+ percpu_write(current_tinfo, &next_p->tinfo);
+
- if (preload_fpu)
- __math_state_restore();
+ /*
+ * Restore %gs if needed (which is common)
+ */
+@@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- if (prev->gs | next->gs)
- lazy_load_gs(next->gs);
+ switch_fpu_finish(next_p, fpu);
- percpu_write(current_task, next_p);
-
return prev_p;
}
-@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
-
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index 3bd7e6e..90b2bcf 100644
+index 6a364a6..b147d11 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -89,7 +89,7 @@ static void __exit_idle(void)
@@ -16751,9 +17732,9 @@ index 3bd7e6e..90b2bcf 100644
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ struct tss_struct *tss = init_tss + cpu;
unsigned fsindex, gsindex;
- bool preload_fpu;
+ fpu_switch_t fpu;
-@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
prev->usersp = percpu_read(old_rsp);
percpu_write(old_rsp, next->usersp);
percpu_write(current_task, next_p);
@@ -16766,7 +17747,7 @@ index 3bd7e6e..90b2bcf 100644
/*
* Now maybe reload the debug registers and handle I/O bitmaps
-@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
@@ -17822,7 +18803,7 @@ index 09ff517..df19fbff 100644
.short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index a8e3eb8..c9dbd7d 100644
+index 31d9d0f..e244dd9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
@@ -17967,25 +18948,17 @@ index a8e3eb8..c9dbd7d 100644
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
-@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
- void __math_state_restore(void)
+@@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+ void __math_state_restore(struct task_struct *tsk)
{
- struct thread_info *thread = current_thread_info();
-- struct task_struct *tsk = thread->task;
-+ struct task_struct *tsk = current;
-
- /*
- * Paranoid restore. send a SIGSEGV if we fail to restore the state.
-@@ -595,8 +624,7 @@ void __math_state_restore(void)
- */
- asmlinkage void math_state_restore(void)
- {
-- struct thread_info *thread = current_thread_info();
-- struct task_struct *tsk = thread->task;
-+ struct task_struct *tsk = current;
+ /* We need a safe address that is cheap to find and that is already
+- in L1. We've just brought in "tsk->thread.has_fpu", so use that */
+-#define safe_address (tsk->thread.has_fpu)
++ in L1. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
- if (!tsk_used_math(tsk)) {
- local_irq_enable();
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index b9242ba..50c5edd 100644
--- a/arch/x86/kernel/verify_cpu.S
@@ -18396,7 +19369,7 @@ index 9796c2f..f686fbf 100644
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index a391134..d0b63b6e 100644
+index 7110911..e8cdee5 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
@@ -18408,7 +19381,7 @@ index a391134..d0b63b6e 100644
fx_sw_user->extended_size -
FP_XSTATE_MAGIC2_SIZE));
if (err)
-@@ -267,7 +267,7 @@ fx_only:
+@@ -266,7 +266,7 @@ fx_only:
* the other extended state.
*/
xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
@@ -18417,7 +19390,7 @@ index a391134..d0b63b6e 100644
}
/*
-@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
+@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
if (use_xsave())
err = restore_user_xstate(buf);
else
@@ -18540,7 +19513,7 @@ index e32243e..a6e6172 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 579a0b5..ed7bbf9 100644
+index 4ea7678..b3a7084 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1305,7 +1305,11 @@ static void reload_tss(void)
@@ -25133,6 +26106,67 @@ index b095739..8c17bcd 100644
struct trap_info;
void xen_copy_trap_info(struct trap_info *traps);
+diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
+index 525bd3d..ef888b1 100644
+--- a/arch/xtensa/variants/dc232b/include/variant/core.h
++++ b/arch/xtensa/variants/dc232b/include/variant/core.h
+@@ -119,9 +119,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
+index 2f33760..835e50a 100644
+--- a/arch/xtensa/variants/fsf/include/variant/core.h
++++ b/arch/xtensa/variants/fsf/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_H
+ #define _XTENSA_CORE_H
+
++#include <linux/const.h>
+
+ /****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -112,9 +113,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
+index af00795..2bb8105 100644
+--- a/arch/xtensa/variants/s6000/include/variant/core.h
++++ b/arch/xtensa/variants/s6000/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_CONFIGURATION_H
+ #define _XTENSA_CORE_CONFIGURATION_H
+
++#include <linux/const.h>
+
+ /****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -118,9 +119,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 58916af..9cb880b 100644
--- a/block/blk-iopoll.c
@@ -28881,6 +29915,18 @@ index 5a82b6b..9e69c73 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index cb1acff..8861bc5 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ nfaces = 1;
++ array = 0;
+ switch (G_038000_DIM(word0)) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8227e76..ce0b195 100644
--- a/drivers/gpu/drm/radeon/radeon.h
@@ -33867,7 +34913,7 @@ index 1cfbf22..be96487 100644
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 04e74f4..a960176 100644
+index dfee1b3..a454fb6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -35231,7 +36277,7 @@ index bafccb3..e3ac78d 100644
/* Ignore return since this msg is optional. */
rndis_filter_send_request(dev, request);
diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
-index 9e8f010..af9efb5 100644
+index 9e8f010..af9efb56 100644
--- a/drivers/staging/iio/buffer_generic.h
+++ b/drivers/staging/iio/buffer_generic.h
@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
@@ -35620,7 +36666,7 @@ index 6845228..df77141 100644
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 861628e..659ae80 100644
+index e4ddb93..2fc6e0f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
@@ -35662,7 +36708,7 @@ index 861628e..659ae80 100644
cmd->t_task_list_num)
atomic_set(&cmd->t_transport_sent, 1);
-@@ -4273,7 +4273,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
+@@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
atomic_set(&cmd->transport_lun_stop, 0);
}
if (!atomic_read(&cmd->t_transport_active) ||
@@ -35671,7 +36717,7 @@ index 861628e..659ae80 100644
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
-@@ -4522,7 +4522,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+@@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
int ret = 0;
@@ -35680,7 +36726,7 @@ index 861628e..659ae80 100644
if (!send_status ||
(cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
return 1;
-@@ -4559,7 +4559,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
@@ -39794,7 +40840,7 @@ index 8342ca6..82fd192 100644
kfree(link);
}
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
-index a6395bd..a5b24c4 100644
+index a6395bd..f1e376a 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -16,6 +16,7 @@
@@ -39836,7 +40882,17 @@ index a6395bd..a5b24c4 100644
if (ex.a_data + ex.a_bss > rlim)
return -ENOMEM;
-@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+@@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ current->mm->free_area_cache = current->mm->mmap_base;
+ current->mm->cached_hole_size = 0;
+
++ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
++ if (retval < 0) {
++ /* Someone check-me: is this error path enough? */
++ send_sig(SIGKILL, current, 0);
++ return retval;
++ }
++
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
@@ -39864,7 +40920,7 @@ index a6395bd..a5b24c4 100644
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
loff_t pos;
-@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+@@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
down_write(&current->mm->mmap_sem);
error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
@@ -39873,8 +40929,22 @@ index a6395bd..a5b24c4 100644
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
fd_offset + ex.a_text);
up_write(&current->mm->mmap_sem);
+@@ -352,13 +387,6 @@ beyond_if:
+ return retval;
+ }
+
+- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+- if (retval < 0) {
+- /* Someone check-me: is this error path enough? */
+- send_sig(SIGKILL, current, 0);
+- return retval;
+- }
+-
+ current->mm->start_stack =
+ (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
+ #ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 21ac5ee..31d14e9 100644
+index 21ac5ee..dbf63ee 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -32,6 +32,7 @@
@@ -40009,6 +41079,7 @@ index 21ac5ee..31d14e9 100644
return error;
}
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
+{
+ unsigned long pax_flags = 0UL;
@@ -40154,7 +41225,7 @@ index 21ac5ee..31d14e9 100644
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(__supported_pte_mask & _PAGE_NX)) {
++ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
+ pax_flags &= ~MF_PAX_PAGEEXEC;
+ pax_flags |= MF_PAX_SEGMEXEC;
+ }
@@ -40328,7 +41399,6 @@ index 21ac5ee..31d14e9 100644
+
+}
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
+{
+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
@@ -40565,6 +41635,15 @@ index 21ac5ee..31d14e9 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
+@@ -1421,7 +1886,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
+ for (i = 1; i < view->n; ++i) {
+ const struct user_regset *regset = &view->regsets[i];
+ do_thread_regset_writeback(t->task, regset);
+- if (regset->core_note_type &&
++ if (regset->core_note_type && regset->get &&
+ (!regset->active || regset->active(t->task, regset))) {
+ int ret;
+ size_t size = regset->n * regset->size;
@@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
@@ -41632,7 +42711,7 @@ index f3a257d..715ac0f 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index d2039ca..a766407 100644
+index af11098..81e3bbe 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
@@ -41698,7 +42777,7 @@ index 608c1c3..7d040a8 100644
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
-index 3625464..04855f9 100644
+index 3625464..cdeecdb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,12 +55,28 @@
@@ -41766,11 +42845,11 @@ index 3625464..04855f9 100644
return page;
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ // only allow 1MB for argv+env on suid/sgid binaries
++ // only allow 512KB for argv+env on suid/sgid binaries
+ // to prevent easy ASLR exhaustion
+ if (((bprm->cred->euid != current_euid()) ||
+ (bprm->cred->egid != current_egid())) &&
-+ (size > (1024 * 1024))) {
++ (size > (512 * 1024))) {
+ put_page(page);
+ return NULL;
+ }
@@ -41798,7 +42877,7 @@ index 3625464..04855f9 100644
+
+#ifdef CONFIG_PAX_RANDUSTACK
+ if (randomize_va_space)
-+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
++ bprm->p ^= random32() & ~PAGE_MASK;
+#endif
+
return 0;
@@ -42023,18 +43102,36 @@ index 3625464..04855f9 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+@@ -1442,6 +1475,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
EXPORT_SYMBOL(search_binary_handler);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
++static DEFINE_PER_CPU(u64, exec_counter);
++static int __init init_exec_counters(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu) {
++ per_cpu(exec_counter, cpu) = (u64)cpu;
++ }
++
++ return 0;
++}
++early_initcall(init_exec_counters);
++static inline void increment_exec_counter(void)
++{
++ BUILD_BUG_ON(NR_CPUS > (1 << 16));
++ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
++}
++#else
++static inline void increment_exec_counter(void) {}
+#endif
+
/*
* sys_execve() executes a new program.
*/
-@@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
+@@ -1450,6 +1505,11 @@ static int do_execve_common(const char *filename,
struct user_arg_ptr envp,
struct pt_regs *regs)
{
@@ -42046,7 +43143,7 @@ index 3625464..04855f9 100644
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
-@@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
+@@ -1457,6 +1517,8 @@ static int do_execve_common(const char *filename,
int retval;
const struct cred *cred = current_cred();
@@ -42055,7 +43152,7 @@ index 3625464..04855f9 100644
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
-@@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
+@@ -1497,12 +1559,27 @@ static int do_execve_common(const char *filename,
if (IS_ERR(file))
goto out_unmark;
@@ -42083,24 +43180,10 @@ index 3625464..04855f9 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
+@@ -1519,24 +1596,65 @@ static int do_execve_common(const char *filename,
if (retval < 0)
goto out;
-+ if (!gr_tpe_allow(file)) {
-+ retval = -EACCES;
-+ goto out;
-+ }
-+
-+ if (gr_check_crash_exec(file)) {
-+ retval = -EACCES;
-+ goto out;
-+ }
-+
-+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
-+
-+ gr_handle_exec_args(bprm, argv);
-+
+#ifdef CONFIG_GRKERNSEC
+ old_acl = current->acl;
+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
@@ -42108,12 +43191,50 @@ index 3625464..04855f9 100644
+ get_file(file);
+ current->exec_file = file;
+#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ /* limit suid stack to 8MB
++ we saved the old limits above and will restore them if this exec fails
++ */
++ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
++ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
++#endif
++
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out_fail;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out_fail;
++ }
+
+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
+ bprm->unsafe);
+ if (retval < 0)
+ goto out_fail;
+
+ retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
+
+ bprm->exec = bprm->p;
+ retval = copy_strings(bprm->envc, envp, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
+
+ retval = copy_strings(bprm->argc, argv, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(bprm, argv);
+
retval = search_binary_handler(bprm,regs);
if (retval < 0)
- goto out;
@@ -42124,14 +43245,12 @@ index 3625464..04855f9 100644
+#endif
/* execve succeeded */
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
-+#endif
+
++ increment_exec_counter();
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
-@@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
+@@ -1545,6 +1663,14 @@ static int do_execve_common(const char *filename,
put_files_struct(displaced);
return retval;
@@ -42146,7 +43265,7 @@ index 3625464..04855f9 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
+@@ -1618,7 +1744,7 @@ static int expand_corename(struct core_name *cn)
{
char *old_corename = cn->corename;
@@ -42155,7 +43274,7 @@ index 3625464..04855f9 100644
cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
if (!cn->corename) {
-@@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
+@@ -1715,7 +1841,7 @@ static int format_corename(struct core_name *cn, long signr)
int pid_in_pattern = 0;
int err = 0;
@@ -42164,7 +43283,7 @@ index 3625464..04855f9 100644
cn->corename = kmalloc(cn->size, GFP_KERNEL);
cn->used = 0;
-@@ -1812,6 +1914,218 @@ out:
+@@ -1812,6 +1938,218 @@ out:
return ispipe;
}
@@ -42355,7 +43474,7 @@ index 3625464..04855f9 100644
+#endif
+}
+
-+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
+{
+ if (current->signal->curr_ip)
+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
@@ -42383,7 +43502,7 @@ index 3625464..04855f9 100644
static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
-@@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -2023,17 +2361,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -42406,7 +43525,7 @@ index 3625464..04855f9 100644
pipe_unlock(pipe);
}
-@@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2094,7 +2432,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
int retval = 0;
int flag = 0;
int ispipe;
@@ -42415,7 +43534,7 @@ index 3625464..04855f9 100644
struct coredump_params cprm = {
.signr = signr,
.regs = regs,
-@@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2109,6 +2447,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
audit_core_dumps(signr);
@@ -42425,7 +43544,7 @@ index 3625464..04855f9 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2176,7 +2517,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
}
cprm.limit = RLIM_INFINITY;
@@ -42434,7 +43553,7 @@ index 3625464..04855f9 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2203,6 +2544,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
} else {
struct inode *inode;
@@ -42443,7 +43562,7 @@ index 3625464..04855f9 100644
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
-@@ -2246,7 +2565,7 @@ close_fail:
+@@ -2246,7 +2589,7 @@ close_fail:
filp_close(cprm.file, NULL);
fail_dropcount:
if (ispipe)
@@ -42452,7 +43571,7 @@ index 3625464..04855f9 100644
fail_unlock:
kfree(cn.corename);
fail_corename:
-@@ -2265,7 +2584,7 @@ fail:
+@@ -2265,7 +2608,7 @@ fail:
*/
int dump_write(struct file *file, const void *addr, int nr)
{
@@ -44416,7 +45535,7 @@ index 637694b..f84a121 100644
lock_flocks();
diff --git a/fs/namei.c b/fs/namei.c
-index 5008f01..90328a7 100644
+index 744e942..24ef47f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -44491,7 +45610,7 @@ index 5008f01..90328a7 100644
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
-@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -44513,7 +45632,7 @@ index 5008f01..90328a7 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
-@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
+@@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval)) {
@@ -44529,7 +45648,7 @@ index 5008f01..90328a7 100644
if (unlikely(!audit_dummy_context())) {
if (nd->path.dentry && nd->inode)
audit_inode(name, nd->path.dentry);
-@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -44543,7 +45662,7 @@ index 5008f01..90328a7 100644
return 0;
}
-@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -44560,7 +45679,7 @@ index 5008f01..90328a7 100644
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
-@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -44577,7 +45696,7 @@ index 5008f01..90328a7 100644
audit_inode(pathname, dir);
goto ok;
}
-@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return ERR_PTR(-ECHILD);
@@ -44594,7 +45713,7 @@ index 5008f01..90328a7 100644
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
-@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
int mode = op->mode;
@@ -44607,7 +45726,7 @@ index 5008f01..90328a7 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = vfs_create(dir->d_inode, dentry, mode, nd);
if (error)
goto exit_mutex_unlock;
@@ -44616,7 +45735,7 @@ index 5008f01..90328a7 100644
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->path.dentry);
nd->path.dentry = dentry;
-@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/*
* It already exists.
*/
@@ -44636,7 +45755,7 @@ index 5008f01..90328a7 100644
mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
-@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
+@@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
*path = nd.path;
return dentry;
eexist:
@@ -44648,7 +45767,7 @@ index 5008f01..90328a7 100644
dput(dentry);
dentry = ERR_PTR(-EEXIST);
fail:
-@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
+@@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
}
EXPORT_SYMBOL(user_path_create);
@@ -44669,7 +45788,7 @@ index 5008f01..90328a7 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+@@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -44687,7 +45806,7 @@ index 5008f01..90328a7 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out_drop_write;
-@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+@@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
}
out_drop_write:
mnt_drop_write(path.mnt);
@@ -44697,7 +45816,7 @@ index 5008f01..90328a7 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+@@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -44719,7 +45838,7 @@ index 5008f01..90328a7 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
char * name;
struct dentry *dentry;
struct nameidata nd;
@@ -44728,7 +45847,7 @@ index 5008f01..90328a7 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = -ENOENT;
goto exit3;
}
@@ -44744,7 +45863,7 @@ index 5008f01..90328a7 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
-@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
if (error)
goto exit4;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
@@ -44753,7 +45872,7 @@ index 5008f01..90328a7 100644
exit4:
mnt_drop_write(nd.path.mnt);
exit3:
-@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
@@ -44762,7 +45881,7 @@ index 5008f01..90328a7 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (!inode)
goto slashes;
ihold(inode);
@@ -44779,7 +45898,7 @@ index 5008f01..90328a7 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
-@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (error)
goto exit3;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
@@ -44788,7 +45907,7 @@ index 5008f01..90328a7 100644
exit3:
mnt_drop_write(nd.path.mnt);
exit2:
-@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
+@@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -44807,7 +45926,7 @@ index 5008f01..90328a7 100644
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
-@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
{
struct dentry *new_dentry;
struct path old_path, new_path;
@@ -44815,7 +45934,7 @@ index 5008f01..90328a7 100644
int how = 0;
int error;
-@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if (error)
return error;
@@ -44824,7 +45943,7 @@ index 5008f01..90328a7 100644
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
-@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
error = mnt_want_write(new_path.mnt);
if (error)
goto out_dput;
@@ -44855,7 +45974,7 @@ index 5008f01..90328a7 100644
dput(new_dentry);
mutex_unlock(&new_path.dentry->d_inode->i_mutex);
path_put(&new_path);
-@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
if (new_dentry == trap)
goto exit5;
@@ -44868,7 +45987,7 @@ index 5008f01..90328a7 100644
error = mnt_want_write(oldnd.path.mnt);
if (error)
goto exit5;
-@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
goto exit6;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
@@ -44878,7 +45997,7 @@ index 5008f01..90328a7 100644
exit6:
mnt_drop_write(oldnd.path.mnt);
exit5:
-@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -44887,7 +46006,7 @@ index 5008f01..90328a7 100644
int len;
len = PTR_ERR(link);
-@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -45534,7 +46653,7 @@ index 15af622..0e9f4467 100644
help
Various /proc files exist to monitor process memory utilization:
diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 3a1dafd..1456746 100644
+index 3a1dafd..bf1bd84 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -60,6 +60,7 @@
@@ -45642,9 +46761,12 @@ index 3a1dafd..1456746 100644
esp,
eip,
/* The signal information here is obsolete.
-@@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+@@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
- struct mm_struct *mm = get_task_mm(task);
+- struct mm_struct *mm = get_task_mm(task);
++ struct mm_struct *mm;
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+ if (current->exec_id != m->exec_id) {
@@ -45652,7 +46774,7 @@ index 3a1dafd..1456746 100644
+ return 0;
+ }
+#endif
-+
++ mm = get_task_mm(task);
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
@@ -47340,10 +48462,10 @@ index 23ce927..e274cc1 100644
kfree(s);
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..41df561
+index 0000000..4089e05
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1075 @@
+@@ -0,0 +1,1078 @@
+#
+# grecurity configuration
+#
@@ -47618,11 +48740,13 @@ index 0000000..41df561
+ dangerous sources of information, this option causes reads of sensitive
+ /proc/<pid> entries where the file descriptor was opened in a different
+ task than the one performing the read. Such attempts are logged.
-+ Finally, this option limits argv/env strings for suid/sgid binaries
-+ to 1MB to prevent a complete exhaustion of the stack entropy provided
-+ by ASLR.
++ This option also limits argv/env strings for suid/sgid binaries
++ to 512KB to prevent a complete exhaustion of the stack entropy provided
++ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
++ binaries to prevent alternative mmap layouts from being abused.
++
+ If you use PaX it is essential that you say Y here as it closes up
-+ several holes that make full ASLR useless for suid/sgid binaries.
++ several holes that make full ASLR useless locally.
+
+config GRKERNSEC_BRUTE
+ bool "Deter exploit bruteforcing"
@@ -47762,8 +48886,9 @@ index 0000000..41df561
+ Depending upon the option you choose, you can either restrict users to
+ see only the processes they themselves run, or choose a group that can
+ view all processes and files normally restricted to root if you choose
-+ the "restrict to user only" option. NOTE: If you're running identd as
-+ a non-root user, you will have to run it as the group you specify here.
++ the "restrict to user only" option. NOTE: If you're running identd or
++ ntpd as a non-root user, you will have to run it as the group you
++ specify here.
+
+config GRKERNSEC_PROC_USER
+ bool "Restrict /proc to user only"
@@ -48421,10 +49546,10 @@ index 0000000..41df561
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
-index 0000000..496e60d
+index 0000000..1b9afa9
--- /dev/null
+++ b/grsecurity/Makefile
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,38 @@
+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
+# during 2001-2009 it has been completely redesigned by Brad Spengler
+# into an RBAC system
@@ -48433,9 +49558,7 @@ index 0000000..496e60d
+# are copyright Brad Spengler - Open Source Security, Inc., and released
+# under the GPL v2 or higher
+
-+ifndef CONFIG_IA64
+KBUILD_CFLAGS += -Werror
-+endif
+
+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
@@ -48467,10 +49590,10 @@ index 0000000..496e60d
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..7715893
+index 0000000..2733872
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4164 @@
+@@ -0,0 +1,4163 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -50246,7 +51369,7 @@ index 0000000..7715893
+
+static struct acl_object_label *
+chk_glob_label(struct acl_object_label *globbed,
-+ struct dentry *dentry, struct vfsmount *mnt, char **path)
++ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
+{
+ struct acl_object_label *tmp;
+
@@ -50279,8 +51402,7 @@ index 0000000..7715893
+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
+ if (retval) {
+ if (checkglob && retval->globbed) {
-+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
-+ (struct vfsmount *)orig_mnt, path);
++ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
+ if (retval2)
+ retval = retval2;
+ }
@@ -57654,6 +58776,19 @@ index 1bfcfe5..e04c5c9 100644
+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
#endif /* __ASM_GENERIC_CACHE_H */
+diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
+index 0d68a1e..b74a761 100644
+--- a/include/asm-generic/emergency-restart.h
++++ b/include/asm-generic/emergency-restart.h
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+ #define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+-static inline void machine_emergency_restart(void)
++static inline __noreturn void machine_emergency_restart(void)
+ {
+ machine_restart(NULL);
+ }
diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
index 1ca3efc..e3dc852 100644
--- a/include/asm-generic/int-l64.h
@@ -58447,10 +59582,10 @@ index 84ccf8e..2e9b14c 100644
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
-index e0bc4ff..d79c2fa 100644
+index 10b2288..09180e4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -1608,7 +1608,8 @@ struct file_operations {
+@@ -1609,7 +1609,8 @@ struct file_operations {
int (*setlease)(struct file *, long, struct file_lock **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
@@ -60435,6 +61570,18 @@ index b1f8912..c955bff 100644
/*
* Protect attach/detach and child_list:
+diff --git a/include/linux/personality.h b/include/linux/personality.h
+index 8fc7dd1a..c19d89e 100644
+--- a/include/linux/personality.h
++++ b/include/linux/personality.h
+@@ -44,6 +44,7 @@ enum {
+ #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
+ ADDR_NO_RANDOMIZE | \
+ ADDR_COMPAT_LAYOUT | \
++ ADDR_LIMIT_3GB | \
+ MMAP_PAGE_ZERO)
+
+ /*
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 77257c9..51d473a 100644
--- a/include/linux/pipe_fs_i.h
@@ -60607,6 +61754,30 @@ index e0879a7..a12f962 100644
#include <asm/emergency-restart.h>
#endif
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 8abee65..5150fd1 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
+ {
+ const struct user_regset *regset = &view->regsets[setno];
+
++ if (!regset->get)
++ return -EOPNOTSUPP;
++
+ if (!access_ok(VERIFY_WRITE, data, size))
+ return -EIO;
+
+@@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
+ {
+ const struct user_regset *regset = &view->regsets[setno];
+
++ if (!regset->set)
++ return -EOPNOTSUPP;
++
+ if (!access_ok(VERIFY_READ, data, size))
+ return -EIO;
+
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 96d465f..b084e05 100644
--- a/include/linux/reiserfs_fs.h
@@ -60687,7 +61858,7 @@ index 2148b12..519b820 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 1c4f3e9..b4e4851 100644
+index 1c4f3e9..342eb1f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio_list;
@@ -60871,7 +62042,7 @@ index 1c4f3e9..b4e4851 100644
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
-+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
++extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -60912,7 +62083,7 @@ index 1c4f3e9..b4e4851 100644
extern void flush_itimer_signals(void);
-extern NORET_TYPE void do_group_exit(int);
-+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
++extern __noreturn void do_group_exit(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
@@ -61279,7 +62450,7 @@ index c14fe86..393245e 100644
#define RPCRDMA_VERSION 1
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
-index 703cfa3..0b8ca72ac 100644
+index 703cfa33..0b8ca72ac 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -155,7 +155,11 @@ enum
@@ -61799,10 +62970,10 @@ index 9e5425b..8136ffc 100644
/* Protects from simultaneous access to first_req list */
spinlock_t info_list_lock;
diff --git a/include/net/flow.h b/include/net/flow.h
-index 57f15a7..0de26c6 100644
+index 2a7eefd..3250f3b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
-@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
+@@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
extern void flow_cache_flush(void);
extern void flow_cache_flush_deferred(void);
@@ -62197,7 +63368,7 @@ index 444cd6b..3327cc5 100644
const struct firmware *dsp_microcode;
const struct firmware *controller_microcode;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index a79886c..b483af6 100644
+index 94bbec3..3a8c6b0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -346,7 +346,7 @@ struct t10_reservation_ops {
@@ -62220,7 +63391,7 @@ index a79886c..b483af6 100644
atomic_t t_transport_active;
atomic_t t_transport_complete;
atomic_t t_transport_queue_active;
-@@ -704,7 +704,7 @@ struct se_device {
+@@ -705,7 +705,7 @@ struct se_device {
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
atomic_t depth_left;
@@ -62725,7 +63896,7 @@ index 5b4293d..f179875 100644
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
diff --git a/ipc/msg.c b/ipc/msg.c
-index 7385de2..a8180e0 100644
+index 7385de2..a8180e08 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
@@ -63555,7 +64726,7 @@ index 58690af..d903d75 100644
/*
diff --git a/kernel/exit.c b/kernel/exit.c
-index e6e01b9..619f837 100644
+index e6e01b9..0a21b0a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -57,6 +57,10 @@
@@ -63634,11 +64805,20 @@ index e6e01b9..619f837 100644
exit_mm(tsk);
if (group_dead)
+@@ -1068,7 +1091,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+ * Take down every thread in the group. This is called by fatal signals
+ * as well as by sys_exit_group (below).
+ */
+-NORET_TYPE void
++__noreturn void
+ do_group_exit(int exit_code)
+ {
+ struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index da4a6a1..0973380 100644
+index 0acf42c0..9e40e2e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+@@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -63647,7 +64827,7 @@ index da4a6a1..0973380 100644
#endif
/*
-@@ -304,13 +304,77 @@ out:
+@@ -305,13 +305,77 @@ out:
}
#ifdef CONFIG_MMU
@@ -63727,7 +64907,7 @@ index da4a6a1..0973380 100644
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
-@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
@@ -63738,7 +64918,7 @@ index da4a6a1..0973380 100644
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
-@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -63747,7 +64927,7 @@ index da4a6a1..0973380 100644
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
-@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
-pages);
continue;
}
@@ -63805,7 +64985,7 @@ index da4a6a1..0973380 100644
/*
* Link in the new vma and copy the page table entries.
-@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
}
@@ -63837,7 +65017,7 @@ index da4a6a1..0973380 100644
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
-@@ -425,14 +470,6 @@ out:
+@@ -426,14 +471,6 @@ out:
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
@@ -63852,7 +65032,7 @@ index da4a6a1..0973380 100644
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
+@@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(get_task_mm);
@@ -63879,7 +65059,7 @@ index da4a6a1..0973380 100644
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
-@@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
spin_unlock(&fs->lock);
return -EAGAIN;
}
@@ -63895,7 +65075,7 @@ index da4a6a1..0973380 100644
return 0;
}
-@@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
@@ -63905,7 +65085,7 @@ index da4a6a1..0973380 100644
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
-@@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
@@ -63914,7 +65094,7 @@ index da4a6a1..0973380 100644
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
-@@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
+@@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -63923,7 +65103,7 @@ index da4a6a1..0973380 100644
return ERR_PTR(retval);
}
-@@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -63932,7 +65112,7 @@ index da4a6a1..0973380 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -63941,7 +65121,7 @@ index da4a6a1..0973380 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -67701,7 +68881,7 @@ index d9df745..e73c2fe 100644
static inline void *ptr_to_indirect(void *ptr)
{
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index 993599e..84dc70e 100644
+index 993599e..f1dbc14 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -16,6 +16,9 @@
@@ -67773,7 +68953,26 @@ index 993599e..84dc70e 100644
case 'B':
return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
-@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+@@ -878,9 +894,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
+ case 'V':
+- return buf + vsnprintf(buf, end > buf ? end - buf : 0,
+- ((struct va_format *)ptr)->fmt,
+- *(((struct va_format *)ptr)->va));
++ {
++ va_list va;
++
++ va_copy(va, *((struct va_format *)ptr)->va);
++ buf += vsnprintf(buf, end > buf ? end - buf : 0,
++ ((struct va_format *)ptr)->fmt, va);
++ va_end(va);
++ return buf;
++ }
+ case 'K':
+ /*
+ * %pK cannot be used in IRQ context because its test
+@@ -1608,11 +1630,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
typeof(type) value; \
if (sizeof(type) == 8) { \
args = PTR_ALIGN(args, sizeof(u32)); \
@@ -67788,7 +68987,7 @@ index 993599e..84dc70e 100644
} \
args += sizeof(type); \
value; \
-@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+@@ -1675,7 +1697,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case FORMAT_TYPE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
@@ -68955,7 +70154,7 @@ index 4f4f53b..9511904 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index eae90af..44552cf 100644
+index eae90af..c930262 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -46,6 +46,16 @@
@@ -69289,7 +70488,18 @@ index eae90af..44552cf 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1281,6 +1412,19 @@ munmap_back:
+@@ -1266,8 +1397,9 @@ munmap_back:
+ vma->vm_pgoff = pgoff;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+
++ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
++
+ if (file) {
+- error = -EINVAL;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ goto free_vma;
+ if (vm_flags & VM_DENYWRITE) {
+@@ -1281,6 +1413,19 @@ munmap_back:
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -69309,7 +70519,16 @@ index eae90af..44552cf 100644
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
-@@ -1316,6 +1460,11 @@ munmap_back:
+@@ -1293,6 +1438,8 @@ munmap_back:
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
+ } else if (vm_flags & VM_SHARED) {
++ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
++ goto free_vma;
+ error = shmem_zero_setup(vma);
+ if (error)
+ goto free_vma;
+@@ -1316,6 +1463,11 @@ munmap_back:
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -69321,7 +70540,7 @@ index eae90af..44552cf 100644
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
atomic_inc(&inode->i_writecount);
-@@ -1324,6 +1473,7 @@ out:
+@@ -1324,6 +1476,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -69329,7 +70548,7 @@ index eae90af..44552cf 100644
if (vm_flags & VM_LOCKED) {
if (!mlock_vma_pages_range(vma, addr, addr + len))
mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
+@@ -1341,6 +1494,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -69342,7 +70561,7 @@ index eae90af..44552cf 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1348,6 +1504,44 @@ unacct_error:
+@@ -1348,6 +1507,44 @@ unacct_error:
return error;
}
@@ -69387,7 +70606,7 @@ index eae90af..44552cf 100644
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
-@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1374,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -69418,7 +70637,7 @@ index eae90af..44552cf 100644
}
full_search:
-@@ -1396,34 +1595,40 @@ full_search:
+@@ -1396,34 +1598,40 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -69470,7 +70689,7 @@ index eae90af..44552cf 100644
mm->free_area_cache = addr;
mm->cached_hole_size = ~0UL;
}
-@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1441,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -69479,7 +70698,7 @@ index eae90af..44552cf 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1450,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -69502,7 +70721,7 @@ index eae90af..44552cf 100644
}
/* check if free_area_cache is useful for us */
-@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1471,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* make sure it can fit in the remaining address space */
if (addr > len) {
vma = find_vma(mm, addr-len);
@@ -69511,7 +70730,7 @@ index eae90af..44552cf 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
-@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1488,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* return with success:
*/
vma = find_vma(mm, addr);
@@ -69520,7 +70739,7 @@ index eae90af..44552cf 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
-@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1497,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
@@ -69531,7 +70750,7 @@ index eae90af..44552cf 100644
bottomup:
/*
-@@ -1507,13 +1717,21 @@ bottomup:
+@@ -1507,13 +1720,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -69555,7 +70774,7 @@ index eae90af..44552cf 100644
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1522,6 +1740,12 @@ bottomup:
+@@ -1522,6 +1743,12 @@ bottomup:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -69568,7 +70787,7 @@ index eae90af..44552cf 100644
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1529,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -69580,14 +70799,13 @@ index eae90af..44552cf 100644
}
unsigned long
-@@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1603,40 +1832,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
-+ * Note: pprev is set to NULL when return value is NULL.
+ */
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -69597,22 +70815,13 @@ index eae90af..44552cf 100644
- struct rb_node *rb_node;
- if (!mm)
- goto out;
-+ struct vm_area_struct *vma;
-
+-
- /* Guard against addr being lower than the first VMA */
- vma = mm->mmap;
-+ vma = find_vma(mm, addr);
-+ *pprev = vma ? vma->vm_prev : NULL;
-+ return vma;
-+}
-
+-
- /* Go through the RB tree quickly. */
- rb_node = mm->mm_rb.rb_node;
-+#ifdef CONFIG_PAX_SEGMEXEC
-+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
-+{
-+ struct vm_area_struct *vma_m;
-
+-
- while (rb_node) {
- struct vm_area_struct *vma_tmp;
- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
@@ -69623,17 +70832,35 @@ index eae90af..44552cf 100644
- prev = vma_tmp;
- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
- break;
-- rb_node = rb_node->rb_right;
-- }
-+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
-+ BUG_ON(vma->vm_mirror);
-+ return NULL;
++ struct vm_area_struct *vma;
++
++ vma = find_vma(mm, addr);
++ if (vma) {
++ *pprev = vma->vm_prev;
++ } else {
++ struct rb_node *rb_node = mm->mm_rb.rb_node;
++ *pprev = NULL;
++ while (rb_node) {
++ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ rb_node = rb_node->rb_right;
+ }
}
--
++ return vma;
++}
++
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++ struct vm_area_struct *vma_m;
+
-out:
- *pprev = prev;
- return prev ? prev->vm_next : vma;
++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++ BUG_ON(vma->vm_mirror);
++ return NULL;
++ }
+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
+ vma_m = vma->vm_mirror;
+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
@@ -69648,7 +70875,7 @@ index eae90af..44552cf 100644
/*
* Verify that the stack growth is acceptable and
-@@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1654,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -69656,7 +70883,7 @@ index eae90af..44552cf 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1664,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -69664,7 +70891,7 @@ index eae90af..44552cf 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1694,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -69722,7 +70949,7 @@ index eae90af..44552cf 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1739,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -69731,7 +70958,7 @@ index eae90af..44552cf 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
return error;
-@@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1752,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -69740,7 +70967,7 @@ index eae90af..44552cf 100644
/*
* We must make sure the anon_vma is allocated
-@@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1765,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -69756,7 +70983,7 @@ index eae90af..44552cf 100644
vma_lock_anon_vma(vma);
/*
-@@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1774,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -69775,7 +71002,7 @@ index eae90af..44552cf 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1786,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -69798,7 +71025,7 @@ index eae90af..44552cf 100644
khugepaged_enter_vma_merge(vma);
return error;
}
-@@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1860,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -69812,7 +71039,7 @@ index eae90af..44552cf 100644
mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
-@@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1905,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -69829,7 +71056,7 @@ index eae90af..44552cf 100644
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1933,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -69863,7 +71090,7 @@ index eae90af..44552cf 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1953,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -69886,7 +71113,7 @@ index eae90af..44552cf 100644
pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
err = PTR_ERR(pol);
-@@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1978,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -69929,7 +71156,7 @@ index eae90af..44552cf 100644
/* Success. */
if (!err)
return 0;
-@@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1990,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
removed_exe_file_vma(mm);
fput(new->vm_file);
}
@@ -69949,7 +71176,7 @@ index eae90af..44552cf 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2006,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -69965,7 +71192,7 @@ index eae90af..44552cf 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2017,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -69996,7 +71223,7 @@ index eae90af..44552cf 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2096,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -70005,7 +71232,7 @@ index eae90af..44552cf 100644
return 0;
}
-@@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2108,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
profile_munmap(addr);
@@ -70034,7 +71261,7 @@ index eae90af..44552cf 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2137,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -70042,7 +71269,7 @@ index eae90af..44552cf 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2148,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -70074,7 +71301,7 @@ index eae90af..44552cf 100644
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2174,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -70101,7 +71328,7 @@ index eae90af..44552cf 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2203,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -70110,7 +71337,7 @@ index eae90af..44552cf 100644
return -ENOMEM;
}
-@@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2217,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -70125,7 +71352,7 @@ index eae90af..44552cf 100644
return addr;
}
-@@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2268,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
@@ -70137,7 +71364,7 @@ index eae90af..44552cf 100644
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
-@@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2283,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -70151,7 +71378,7 @@ index eae90af..44552cf 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2305,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -70174,7 +71401,7 @@ index eae90af..44552cf 100644
return 0;
}
-@@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2323,6 +2769,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
@@ -70183,7 +71410,7 @@ index eae90af..44552cf 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2373,6 +2821,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -70223,7 +71450,7 @@ index eae90af..44552cf 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2383,7 +2864,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
unsigned long lim;
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -70232,7 +71459,7 @@ index eae90af..44552cf 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2454,6 +2935,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -70631,7 +71858,7 @@ index 7fa41b4..6087460 100644
return count;
}
diff --git a/mm/nommu.c b/mm/nommu.c
-index b982290..7d73f53 100644
+index ee7e57e..cae4e40 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
@@ -70642,7 +71869,7 @@ index b982290..7d73f53 100644
atomic_long_t mmap_pages_allocated;
-@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -829,15 +828,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
/*
@@ -70658,7 +71885,7 @@ index b982290..7d73f53 100644
* expand a stack to a given address
* - not supported under NOMMU conditions
*/
-@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1557,6 +1547,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
/* most fields are the same, copy all, and then fixup */
*new = *vma;
@@ -72734,7 +73961,7 @@ index 68bbf9f..5ef0d12 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 5a13edf..a6f2bd2 100644
+index c56cacf..b28e35f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
@@ -72806,7 +74033,7 @@ index 5a13edf..a6f2bd2 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
@@ -72815,7 +74042,7 @@ index 5a13edf..a6f2bd2 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -73395,7 +74622,7 @@ index 94cdbc5..0cb0063 100644
ts = peer->tcp_ts;
tsage = get_seconds() - peer->tcp_ts_stamp;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index c89e354..8bd55c8 100644
+index eb90aa8..22bf114 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
@@ -73408,7 +74635,7 @@ index c89e354..8bd55c8 100644
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
-@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
@@ -73418,7 +74645,7 @@ index c89e354..8bd55c8 100644
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
-@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+@@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -73441,7 +74668,7 @@ index c89e354..8bd55c8 100644
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
-@@ -1744,6 +1757,10 @@ no_tcp_socket:
+@@ -1749,6 +1762,10 @@ no_tcp_socket:
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
@@ -73452,7 +74679,7 @@ index c89e354..8bd55c8 100644
tcp_v4_send_reset(NULL, skb);
}
-@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
+@@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
0, /* non standard timer */
0, /* open_requests have no inode */
atomic_read(&sk->sk_refcnt),
@@ -73464,7 +74691,7 @@ index c89e354..8bd55c8 100644
len);
}
-@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
+@@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
sock_i_uid(sk),
icsk->icsk_probes_out,
sock_i_ino(sk),
@@ -73478,7 +74705,7 @@ index c89e354..8bd55c8 100644
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
-@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
+@@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
@@ -74226,7 +75453,7 @@ index 30d7355..e260095 100644
napi_disable(&local->napi);
ieee80211_clear_tx_pending(local);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index a7536fd..4039cc0 100644
+index 7d9b21d..0687004 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
@@ -74373,7 +75600,7 @@ index 29fa5ba..8debc79 100644
if (!todrop_rate[i]) return 0;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index 093cc32..9209ae1 100644
+index 6dc7d7d..e45913a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
@@ -75937,7 +77164,7 @@ index 9049a5c..cfa6f5c 100644
}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
-index d2b366c..51ff91e 100644
+index d2b366c..51ff91ebc 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -109,7 +109,7 @@ endif
@@ -76252,7 +77479,7 @@ index 5c11312..72742b5 100644
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..eeabc9f 100644
+index 51bd5a0..3a4ebd0 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,6 +4,627 @@
@@ -76830,7 +78057,7 @@ index 51bd5a0..eeabc9f 100644
+
+config PAX_REFCOUNT
+ bool "Prevent various kernel object reference counter overflows"
-+ depends on GRKERNSEC && (X86 || SPARC64)
++ depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
+ help
+ By saying Y here the kernel will detect and prevent overflowing
+ various (but not all) kinds of object reference counters. Such
@@ -77813,23 +79040,24 @@ index a39edcc..1014050 100644
};
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..29b6b75
+index 0000000..894c8bf
--- /dev/null
+++ b/tools/gcc/Makefile
-@@ -0,0 +1,21 @@
+@@ -0,0 +1,23 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
+GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
+#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
+
-+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
+
+hostlibs-y := constify_plugin.so
+hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
+hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
+hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
+hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
++hostlibs-y += colorize_plugin.so
+
+always := $(hostlibs-y)
+
@@ -77838,6 +79066,7 @@ index 0000000..29b6b75
+kallocstat_plugin-objs := kallocstat_plugin.o
+kernexec_plugin-objs := kernexec_plugin.o
+checker_plugin-objs := checker_plugin.o
++colorize_plugin-objs := colorize_plugin.o
diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
new file mode 100644
index 0000000..d41b5af
@@ -78015,6 +79244,159 @@ index 0000000..d41b5af
+
+ return 0;
+}
+diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
+new file mode 100644
+index 0000000..ee950d0
+--- /dev/null
++++ b/tools/gcc/colorize_plugin.c
+@@ -0,0 +1,147 @@
++/*
++ * Copyright 2012 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to colorize diagnostic output
++ *
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info colorize_plugin_info = {
++ .version = "201203092200",
++};
++
++#define GREEN "\033[32m\033[2m"
++#define LIGHTGREEN "\033[32m\033[1m"
++#define YELLOW "\033[33m\033[2m"
++#define LIGHTYELLOW "\033[33m\033[1m"
++#define RED "\033[31m\033[2m"
++#define LIGHTRED "\033[31m\033[1m"
++#define BLUE "\033[34m\033[2m"
++#define LIGHTBLUE "\033[34m\033[1m"
++#define BRIGHT "\033[m\033[1m"
++#define NORMAL "\033[m"
++
++static diagnostic_starter_fn old_starter;
++static diagnostic_finalizer_fn old_finalizer;
++
++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ const char *color;
++ char *newprefix;
++
++ switch (diagnostic->kind) {
++ case DK_NOTE:
++ color = LIGHTBLUE;
++ break;
++
++ case DK_PEDWARN:
++ case DK_WARNING:
++ color = LIGHTYELLOW;
++ break;
++
++ case DK_ERROR:
++ case DK_FATAL:
++ case DK_ICE:
++ case DK_PERMERROR:
++ case DK_SORRY:
++ color = LIGHTRED;
++ break;
++
++ default:
++ color = NORMAL;
++ }
++
++ old_starter(context, diagnostic);
++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
++ return;
++ pp_destroy_prefix(context->printer);
++ pp_set_prefix(context->printer, newprefix);
++}
++
++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ old_finalizer(context, diagnostic);
++}
++
++static void colorize_arm(void)
++{
++ old_starter = diagnostic_starter(global_dc);
++ old_finalizer = diagnostic_finalizer(global_dc);
++
++ diagnostic_starter(global_dc) = start_colorize;
++ diagnostic_finalizer(global_dc) = finalize_colorize;
++}
++
++static unsigned int execute_colorize_rearm(void)
++{
++ if (diagnostic_starter(global_dc) == start_colorize)
++ return 0;
++
++ colorize_arm();
++ return 0;
++}
++
++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
++ .pass = {
++ .type = SIMPLE_IPA_PASS,
++ .name = "colorize_rearm",
++ .gate = NULL,
++ .execute = execute_colorize_rearm,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static void colorize_start_unit(void *gcc_data, void *user_data)
++{
++ colorize_arm();
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info colorize_rearm_pass_info = {
++ .pass = &pass_ipa_colorize_rearm.pass,
++ .reference_pass_name = "*free_lang_data",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
++ return 0;
++}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
index 0000000..704a564
@@ -78932,10 +80314,10 @@ index 0000000..008f159
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..8b61031
+index 0000000..ea79948
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
-@@ -0,0 +1,295 @@
+@@ -0,0 +1,326 @@
+/*
+ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -78982,10 +80364,12 @@ index 0000000..8b61031
+static int track_frame_size = -1;
+static const char track_function[] = "pax_track_stack";
+static const char check_function[] = "pax_check_alloca";
++static tree pax_check_alloca_decl;
++static tree pax_track_stack_decl;
+static bool init_locals;
+
+static struct plugin_info stackleak_plugin_info = {
-+ .version = "201111150100",
++ .version = "201203021600",
+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
+// "initialize-locals\t\tforcibly initialize all stack frames\n"
+};
@@ -79038,27 +80422,20 @@ index 0000000..8b61031
+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
+{
+ gimple check_alloca;
-+ tree fndecl, fntype, alloca_size;
++ tree alloca_size;
+
+ // insert call to void pax_check_alloca(unsigned long size)
-+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(check_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
+ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
-+ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++ check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
+ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
+}
+
+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
+{
+ gimple track_stack;
-+ tree fndecl, fntype;
+
+ // insert call to void pax_track_stack(void)
-+ fntype = build_function_type_list(void_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(track_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
-+ track_stack = gimple_build_call(fndecl, 0);
++ track_stack = gimple_build_call(pax_track_stack_decl, 0);
+ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
+}
+
@@ -79095,7 +80472,7 @@ index 0000000..8b61031
+static unsigned int execute_stackleak_tree_instrument(void)
+{
+ basic_block bb, entry_bb;
-+ bool prologue_instrumented = false;
++ bool prologue_instrumented = false, is_leaf = true;
+
+ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
+
@@ -79104,8 +80481,15 @@ index 0000000..8b61031
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt;
++
++ stmt = gsi_stmt(gsi);
++
++ if (is_gimple_call(stmt))
++ is_leaf = false;
++
+ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
-+ if (!is_alloca(gsi_stmt(gsi)))
++ if (!is_alloca(stmt))
+ continue;
+
+ // 2. insert stack overflow check before each __builtin_alloca call
@@ -79118,6 +80502,13 @@ index 0000000..8b61031
+ }
+ }
+
++ // special case for some bad linux code: taking the address of static inline functions will materialize them
++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
++ return 0;
++
+ // 4. insert track call at the beginning
+ if (!prologue_instrumented) {
+ gimple_stmt_iterator gsi;
@@ -79177,6 +80568,27 @@ index 0000000..8b61031
+ return 0;
+}
+
++static void stackleak_start_unit(void *gcc_data, void *user_data)
++{
++ tree fntype;
++
++ // declare void pax_check_alloca(unsigned long size)
++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++ pax_check_alloca_decl = build_fn_decl(check_function, fntype);
++ DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
++ TREE_PUBLIC(pax_check_alloca_decl) = 1;
++ DECL_EXTERNAL(pax_check_alloca_decl) = 1;
++ DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
++
++ // declare void pax_track_stack(void)
++ fntype = build_function_type_list(void_type_node, NULL_TREE);
++ pax_track_stack_decl = build_fn_decl(track_function, fntype);
++ DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
++ TREE_PUBLIC(pax_track_stack_decl) = 1;
++ DECL_EXTERNAL(pax_track_stack_decl) = 1;
++ DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
++}
++
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+ const char * const plugin_name = plugin_info->base_name;
@@ -79188,7 +80600,7 @@ index 0000000..8b61031
+// .reference_pass_name = "tree_profile",
+ .reference_pass_name = "optimized",
+ .ref_pass_instance_number = 0,
-+ .pos_op = PASS_POS_INSERT_AFTER
++ .pos_op = PASS_POS_INSERT_BEFORE
+ };
+ struct register_pass_info stackleak_final_pass_info = {
+ .pass = &stackleak_final_rtl_opt_pass.pass,
@@ -79226,6 +80638,7 @@ index 0000000..8b61031
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
++ register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
+
diff --git a/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch b/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
new file mode 100644
index 000000000..0f26cf40a
--- /dev/null
+++ b/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
@@ -0,0 +1,174 @@
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Tue, 6 Mar 2012 21:20:26 +0000 (+0000)
+Subject: inetpeer: Invalidate the inetpeer tree along with the routing cache
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet.git;a=commitdiff_plain;h=5faa5df1fa2024bd750089ff21dcc4191798263d
+
+inetpeer: Invalidate the inetpeer tree along with the routing cache
+
+We initialize the routing metrics with the values cached on the
+inetpeer in rt_init_metrics(). So if we have the metrics cached on the
+inetpeer, we ignore the user configured fib_metrics.
+
+To fix this issue, we replace the old tree with a fresh initialized
+inet_peer_base. The old tree is removed later with a delayed work queue.
+
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 06b795d..ff04a33 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,6 +41,7 @@ struct inet_peer {
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
++ struct list_head gc_list;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+@@ -96,6 +97,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
+ extern void inet_putpeer(struct inet_peer *p);
+ extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
++extern void inetpeer_invalidate_tree(int family);
++
+ /*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index bf4a9c4..deea2e9 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/net.h>
++#include <linux/workqueue.h>
+ #include <net/ip.h>
+ #include <net/inetpeer.h>
+ #include <net/secure_seq.h>
+@@ -66,6 +67,11 @@
+
+ static struct kmem_cache *peer_cachep __read_mostly;
+
++static LIST_HEAD(gc_list);
++static const int gc_delay = 60 * HZ;
++static struct delayed_work gc_work;
++static DEFINE_SPINLOCK(gc_lock);
++
+ #define node_height(x) x->avl_height
+
+ #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
+ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+
++static void inetpeer_gc_worker(struct work_struct *work)
++{
++ struct inet_peer *p, *n;
++ LIST_HEAD(list);
++
++ spin_lock_bh(&gc_lock);
++ list_replace_init(&gc_list, &list);
++ spin_unlock_bh(&gc_lock);
++
++ if (list_empty(&list))
++ return;
++
++ list_for_each_entry_safe(p, n, &list, gc_list) {
++
++ if(need_resched())
++ cond_resched();
++
++ if (p->avl_left != peer_avl_empty) {
++ list_add_tail(&p->avl_left->gc_list, &list);
++ p->avl_left = peer_avl_empty;
++ }
++
++ if (p->avl_right != peer_avl_empty) {
++ list_add_tail(&p->avl_right->gc_list, &list);
++ p->avl_right = peer_avl_empty;
++ }
++
++ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
++
++ if (!atomic_read(&p->refcnt)) {
++ list_del(&p->gc_list);
++ kmem_cache_free(peer_cachep, p);
++ }
++ }
++
++ if (list_empty(&list))
++ return;
++
++ spin_lock_bh(&gc_lock);
++ list_splice(&list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
+
+ /* Called from ip_output.c:ip_init */
+ void __init inet_initpeers(void)
+@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ NULL);
+
++ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+ }
+
+ static int addr_compare(const struct inetpeer_addr *a,
+@@ -449,7 +500,7 @@ relookup:
+ p->pmtu_orig = 0;
+ p->redirect_genid = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
+-
++ INIT_LIST_HEAD(&p->gc_list);
+
+ /* Link the node. */
+ link_to_pool(p, base);
+@@ -509,3 +560,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+ return rc;
+ }
+ EXPORT_SYMBOL(inet_peer_xrlim_allow);
++
++void inetpeer_invalidate_tree(int family)
++{
++ struct inet_peer *old, *new, *prev;
++ struct inet_peer_base *base = family_to_base(family);
++
++ write_seqlock_bh(&base->lock);
++
++ old = base->root;
++ if (old == peer_avl_empty_rcu)
++ goto out;
++
++ new = peer_avl_empty_rcu;
++
++ prev = cmpxchg(&base->root, old, new);
++ if (prev == old) {
++ base->total = 0;
++ spin_lock(&gc_lock);
++ list_add_tail(&prev->gc_list, &gc_list);
++ spin_unlock(&gc_lock);
++ schedule_delayed_work(&gc_work, gc_delay);
++ }
++
++out:
++ write_sequnlock_bh(&base->lock);
++}
++EXPORT_SYMBOL(inetpeer_invalidate_tree);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index bcacf54..23ce0c1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -938,6 +938,7 @@ static void rt_cache_invalidate(struct net *net)
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
++ inetpeer_invalidate_tree(AF_INET);
+ }
+
+ /*
diff --git a/main/linux-grsec/linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch b/main/linux-grsec/linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
deleted file mode 100644
index 7ca414782..000000000
--- a/main/linux-grsec/linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index f30112f..26a6249 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -1841,6 +1841,22 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
- return mtu;
- }
-
-+static void __rt_init_metrics(struct rtable *rt, struct fib_info *fi,
-+ struct inet_peer *peer)
-+{
-+ if (peer && fi->fib_metrics == (u32 *) dst_default_metrics) {
-+ dst_init_metrics(&rt->dst, peer->metrics, false);
-+ return;
-+ }
-+
-+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
-+ rt->fi = fi;
-+ atomic_inc(&fi->fib_clntref);
-+ }
-+
-+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
-+}
-+
- static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
- struct fib_info *fi)
- {
-@@ -1859,7 +1875,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
- if (inet_metrics_new(peer))
- memcpy(peer->metrics, fi->fib_metrics,
- sizeof(u32) * RTAX_MAX);
-- dst_init_metrics(&rt->dst, peer->metrics, false);
-+
-+ __rt_init_metrics(rt, fi, peer);
-
- check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
-@@ -1869,13 +1886,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
- rt->rt_gateway = peer->redirect_learned.a4;
- rt->rt_flags |= RTCF_REDIRECTED;
- }
-- } else {
-- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
-- rt->fi = fi;
-- atomic_inc(&fi->fib_clntref);
-- }
-- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
-- }
-+ } else
-+ __rt_init_metrics(rt, fi, NULL);
- }
-
- static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
diff --git a/main/linux-grsec/route-remove-redirect-genid.patch b/main/linux-grsec/route-remove-redirect-genid.patch
new file mode 100644
index 000000000..7c1d28b46
--- /dev/null
+++ b/main/linux-grsec/route-remove-redirect-genid.patch
@@ -0,0 +1,81 @@
+One hunk was remove from this patch.
+
+
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Tue, 6 Mar 2012 21:21:10 +0000 (+0000)
+Subject: route: Remove redirect_genid
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet.git;a=commitdiff_plain;h=ac3f48de09d8f4b73397047e413fadff7f65cfa7
+
+route: Remove redirect_genid
+
+As we invalidate the inetpeer tree along with the routing cache now,
+we don't need a genid to reset the redirect handling when the routing
+cache is flushed.
+
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index ff04a33..b94765e 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -35,7 +35,6 @@ struct inet_peer {
+
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
+- int redirect_genid;
+ unsigned long rate_last;
+ unsigned long pmtu_expires;
+ u32 pmtu_orig;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 23ce0c1..0197747 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly = 256;
+ static int rt_chain_length_max __read_mostly = 20;
+-static int redirect_genid;
+
+ static struct delayed_work expires_work;
+ static unsigned long expires_ljiffies;
+@@ -937,7 +936,6 @@ static void rt_cache_invalidate(struct net *net)
+
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
+- redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
+ }
+
+@@ -1486,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
+
+ peer = rt->peer;
+ if (peer) {
+- if (peer->redirect_learned.a4 != new_gw ||
+- peer->redirect_genid != redirect_genid) {
++ if (peer->redirect_learned.a4 != new_gw) {
+ peer->redirect_learned.a4 = new_gw;
+- peer->redirect_genid = redirect_genid;
+ atomic_inc(&__rt_peer_genid);
+ }
+ check_peer_redir(&rt->dst, peer);
+@@ -1794,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt)
+ if (peer) {
+ check_peer_pmtu(&rt->dst, peer);
+
+- if (peer->redirect_genid != redirect_genid)
+- peer->redirect_learned.a4 = 0;
+ if (peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway)
+ check_peer_redir(&rt->dst, peer);
+@@ -1959,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
+ dst_init_metrics(&rt->dst, peer->metrics, false);
+
+ check_peer_pmtu(&rt->dst, peer);
+- if (peer->redirect_genid != redirect_genid)
+- peer->redirect_learned.a4 = 0;
++
+ if (peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ rt->rt_gateway = peer->redirect_learned.a4;