aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2014-12-09 07:32:41 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2014-12-09 08:34:59 +0000
commit5d78fbacb3d3cac6ba5cfb09b14f226753256e65 (patch)
treea03876a5b6e06b3122b103ab4d10ba421b6a0f14 /main
parenta013612b79754e16c11fe12ba7cc436890714f96 (diff)
downloadaports-5d78fbacb3d3cac6ba5cfb09b14f226753256e65.tar.bz2
aports-5d78fbacb3d3cac6ba5cfb09b14f226753256e65.tar.xz
main/linux-grsec: upgrade to 3.14.26
Diffstat (limited to 'main')
-rw-r--r--main/linux-grsec/APKBUILD18
-rw-r--r--main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.25-201411231452.patch)847
2 files changed, 609 insertions, 256 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 3629919e7f..342eb63514 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,12 +2,12 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.14.25
+pkgver=3.14.26
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
esac
-pkgrel=1
+pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
@@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-3.0-3.14.25-201411231452.patch
+ grsecurity-3.0-3.14.26-201412071005.patch
fix-memory-map-for-PIE-applications.patch
imx6q-no-unclocked-sleep.patch
@@ -166,8 +166,8 @@ dev() {
}
md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz
-fffe78a513fa84a15c15a243cac35ca3 patch-3.14.25.xz
-81df75eb4303065d37894fb034f9e19a grsecurity-3.0-3.14.25-201411231452.patch
+8ca9b85121711a42bf37812759c6ca4b patch-3.14.26.xz
+6d4569be80fb761e0193a2a22bbb0421 grsecurity-3.0-3.14.26-201412071005.patch
c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch
57f564077ae0b6f10767cd39856ae2a1 net-gre-Set-inner-mac-header-in-gro-complete.patch
@@ -175,8 +175,8 @@ c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
38b50cd1a7670f886c5e9fe9f1f91496 kernelconfig.x86_64
6709c83fbbd38d40f31d39f0022d4ce9 kernelconfig.armhf"
sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz
-c1a13dbaaabc0fe1330c7e0f0f6e10fbf7d384ccf7f1d15061fec4602233b142 patch-3.14.25.xz
-ff89a9d2887f3d5a50e458b5ab3a3a1726b8c69af175714e1be662c01e3d710c grsecurity-3.0-3.14.25-201411231452.patch
+18a5f194acd519c0da5c208172874d8e3a48cfedb9126b381d168ffdf0a357b1 patch-3.14.26.xz
+56391c13e3d9f860681acdb45d2499ba62a92787ea1634ca3840303c0345c9b7 grsecurity-3.0-3.14.26-201412071005.patch
500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch
21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch
11fc1c1af92e7b8c519b39e46441ffddc2470d1ac4b9af4195c2472600c274d4 net-gre-Set-inner-mac-header-in-gro-complete.patch
@@ -184,8 +184,8 @@ bf953a65ba047b5316509da5bc7a6dbcee12767e343d26e8360369d27bfdbe78 kernelconfig.x
d555a01f2b464e20cfa71c67ea6d571f80c707c5a3fea33879de09b085e2d7b6 kernelconfig.x86_64
01a6c90cf0643f8727d120aede2267ca7303c4ebe548c5d19222d4387ceb98cc kernelconfig.armhf"
sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz
-0ba7ac8b4bc56115d2d88258573f334cb6d1dd7d302f24ae12c1ed693fc3a568801ffa75719ac7622dedf6673e6db6827bf31066b8afde97bc36d8c897e8cfa8 patch-3.14.25.xz
-baaf39b0d2c07a7b3a9829ec944349a4e687dfa78fd52fbdbfda8fed60755de959f133bb2fcc9c61cdd75c20b42160300b043082616b98612631030569d9ceb6 grsecurity-3.0-3.14.25-201411231452.patch
+dffc53bb779f1fd9a9836c148e14394e6498bcaac7dfc2f712e725dfbc56b39702fffa20ef06d7abe418c8d118876ead7e8fc9c21ca990a61f0f10bcefbba749 patch-3.14.26.xz
+3a46876530ad9b1857297892c09b018a1f7dd635d73b23e11045c4001718f095fe3032b2f022a878da2499705e5a214e4aab7a3f7a24df66a2286a29e7dd8a11 grsecurity-3.0-3.14.26-201412071005.patch
4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch
87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch
f51377cb9a481aec98dd396712f2e0da39ac75b65ed6d439c023e25d4e799ec3a2f88a62c68b3c9dd6de18ca91b4b262186b9d8183e2fd24e9d7adfa99674871 net-gre-Set-inner-mac-header-in-gro-complete.patch
diff --git a/main/linux-grsec/grsecurity-3.0-3.14.25-201411231452.patch b/main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch
index ddf2dae84b..0803058730 100644
--- a/main/linux-grsec/grsecurity-3.0-3.14.25-201411231452.patch
+++ b/main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch
@@ -292,7 +292,7 @@ index 7116fda..2f71588 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index eb96e40..b2742ca 100644
+index 63a5ee8..d99d2d9 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2151,10 +2151,10 @@ index 22a3b9b..7f214ee 100644
/*
* set platform specific SMP operations
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 71a06b2..8bb9ae1 100644
+index 3e635ee..c39f5b4 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
-@@ -88,9 +88,9 @@ struct thread_info {
+@@ -77,9 +77,9 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
@@ -2167,7 +2167,7 @@ index 71a06b2..8bb9ae1 100644
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
-@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -146,7 +146,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
@@ -2180,7 +2180,7 @@ index 71a06b2..8bb9ae1 100644
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -159,10 +163,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
@@ -3189,7 +3189,7 @@ index 7a3be1d..b00c7de 100644
start, end);
itcm_present = true;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 9265b8b..381ce44 100644
+index 3f31443..ae30fc0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3220,7 +3220,7 @@ index 9265b8b..381ce44 100644
if (signr)
do_exit(signr);
}
-@@ -884,7 +889,11 @@ void __init early_trap_init(void *vectors_base)
+@@ -857,7 +862,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -14988,7 +14988,7 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index b17f4f4..9620151 100644
+index b17f4f4..7a16182 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -23,7 +23,18 @@
@@ -15199,10 +15199,13 @@ index b17f4f4..9620151 100644
}
/**
-@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+@@ -151,7 +271,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ *
+ * Atomically adds @i to @v and returns @i + @v
*/
- static inline int atomic_add_return(int i, atomic_t *v)
- {
+-static inline int atomic_add_return(int i, atomic_t *v)
++static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
++{
+ return i + xadd_check_overflow(&v->counter, i);
+}
+
@@ -15214,11 +15217,17 @@ index b17f4f4..9620151 100644
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
-+{
+ {
return i + xadd(&v->counter, i);
}
-
-@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+@@ -163,15 +295,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ *
+ * Atomically subtracts @i from @v and returns @v - @i
+ */
+-static inline int atomic_sub_return(int i, atomic_t *v)
++static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
+ {
+ return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
@@ -16286,19 +16295,19 @@ index 59c6c40..5e0b22c 100644
struct compat_timespec {
compat_time_t tv_sec;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 5f12968..a383517 100644
+index 1717156..14e260a 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
-@@ -203,7 +203,7 @@
- #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+@@ -204,7 +204,7 @@
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
+ #define X86_FEATURE_VMMCALL (8*32+15) /* Prefer vmmcall to vmcall */
-
+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-@@ -211,7 +211,7 @@
+@@ -212,7 +212,7 @@
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
@@ -16307,7 +16316,7 @@ index 5f12968..a383517 100644
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
-@@ -358,6 +358,7 @@ extern const char * const x86_power_flags[32];
+@@ -359,6 +359,7 @@ extern const char * const x86_power_flags[32];
#undef cpu_has_centaur_mcr
#define cpu_has_centaur_mcr 0
@@ -16315,7 +16324,7 @@ index 5f12968..a383517 100644
#endif /* CONFIG_X86_64 */
#if __GNUC__ >= 4
-@@ -410,7 +411,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -411,7 +412,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
@@ -16325,7 +16334,7 @@ index 5f12968..a383517 100644
return false;
#endif
-@@ -430,7 +432,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -431,7 +433,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
@@ -16334,7 +16343,7 @@ index 5f12968..a383517 100644
"3: movb $1,%0\n"
"4:\n"
".previous\n"
-@@ -467,7 +469,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -468,7 +470,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .byte 2b - 1b\n" /* src len */
" .byte 4f - 3f\n" /* repl len */
".previous\n"
@@ -16343,7 +16352,7 @@ index 5f12968..a383517 100644
"3: .byte 0xe9\n .long %l[t_no] - 2b\n"
"4:\n"
".previous\n"
-@@ -500,7 +502,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -501,7 +503,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
@@ -16352,7 +16361,7 @@ index 5f12968..a383517 100644
"3: movb $0,%0\n"
"4:\n"
".previous\n"
-@@ -514,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -515,7 +517,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
".previous\n"
@@ -17477,7 +17486,7 @@ index 0f1ddee..e2fc3d1 100644
unsigned long y = x - __START_KERNEL_map;
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
-index 8de6d9c..6782051 100644
+index d54d1ee..75450b2 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,7 +1,7 @@
@@ -19208,7 +19217,7 @@ index d7f3b3b..3cc39f1 100644
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index e1940c0..ac50dd8 100644
+index e870ea9..0f4c275 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -10,6 +10,7 @@
@@ -21088,10 +21097,10 @@ index 7fd54f0..0691410 100644
obj-y += proc.o capflags.o powerflags.o common.o
obj-y += rdrand.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index c67ffa6..f41fbbf 100644
+index c005fdd..e33da29 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
-@@ -752,7 +752,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+@@ -759,7 +759,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
@@ -21101,7 +21110,7 @@ index c67ffa6..f41fbbf 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 3f27f5f..6c575e3 100644
+index e6bddd5..517213d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
@@ -21164,8 +21173,8 @@ index 3f27f5f..6c575e3 100644
-
static int __init x86_xsave_setup(char *s)
{
- setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -293,6 +239,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ if (strlen(s))
+@@ -295,6 +241,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
@@ -21225,7 +21234,7 @@ index 3f27f5f..6c575e3 100644
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu)
+@@ -395,7 +394,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -21234,7 +21243,7 @@ index 3f27f5f..6c575e3 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -883,6 +882,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -885,6 +884,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
@@ -21245,7 +21254,7 @@ index 3f27f5f..6c575e3 100644
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -891,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -893,6 +896,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
@@ -21256,7 +21265,7 @@ index 3f27f5f..6c575e3 100644
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
-@@ -1078,10 +1085,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1080,10 +1087,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -21272,7 +21281,7 @@ index 3f27f5f..6c575e3 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
-@@ -1095,7 +1104,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1097,7 +1106,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -21281,7 +21290,7 @@ index 3f27f5f..6c575e3 100644
EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1245,7 +1254,7 @@ void cpu_init(void)
+@@ -1247,7 +1256,7 @@ void cpu_init(void)
load_ucode_ap();
cpu = stack_smp_processor_id();
@@ -21290,7 +21299,7 @@ index 3f27f5f..6c575e3 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1280,7 +1289,6 @@ void cpu_init(void)
+@@ -1282,7 +1291,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -21298,7 +21307,7 @@ index 3f27f5f..6c575e3 100644
enable_x2apic();
/*
-@@ -1332,7 +1340,7 @@ void cpu_init(void)
+@@ -1334,7 +1342,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -22084,10 +22093,10 @@ index f2a1770..10fa52d 100644
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
-index addb207..921706b 100644
+index 66e274a..99080e6 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
-@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -118,9 +118,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *irq_stack_end =
(unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned used = 0;
@@ -22098,7 +22107,7 @@ index addb207..921706b 100644
if (!task)
task = current;
-@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -141,10 +141,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* current stack address. If the stacks consist of nested
* exceptions
*/
@@ -22110,7 +22119,7 @@ index addb207..921706b 100644
estack_end = in_exception_stack(cpu, (unsigned long)stack,
&used, &id);
-@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, id) < 0)
break;
@@ -22119,7 +22128,7 @@ index addb207..921706b 100644
data, estack_end, &graph);
ops->stack(data, "<EOE>");
/*
-@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -160,6 +160,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* second-to-last pointer (index -2 to end) in the
* exception stack:
*/
@@ -22128,7 +22137,7 @@ index addb207..921706b 100644
stack = (unsigned long *) estack_end[-2];
continue;
}
-@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -171,7 +173,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
if (ops->stack(data, "IRQ") < 0)
break;
@@ -22137,7 +22146,7 @@ index addb207..921706b 100644
ops, data, irq_stack_end, &graph);
/*
* We link to the next stack (which would be
-@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+@@ -190,7 +192,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
/*
* This handles the process stack:
*/
@@ -22148,7 +22157,7 @@ index addb207..921706b 100644
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
-@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
+@@ -299,3 +303,50 @@ int is_valid_bugaddr(unsigned long ip)
return ud2 == 0x0b0f;
}
@@ -23003,7 +23012,7 @@ index c5a9cb9..228d280 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 03cd2a8..d236ccb 100644
+index 02553d6..54e9bd5 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -60,6 +60,8 @@
@@ -23960,32 +23969,16 @@ index 03cd2a8..d236ccb 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1145,7 +1641,7 @@ ENTRY(retint_kernel)
+@@ -1132,7 +1628,7 @@ ENTRY(retint_kernel)
jmp exit_intr
#endif
CFI_ENDPROC
-END(common_interrupt)
+ENDPROC(common_interrupt)
- /*
- * If IRET takes a fault on the espfix stack, then we
-@@ -1167,13 +1663,13 @@ __do_double_fault:
- cmpq $native_irq_return_iret,%rax
- jne do_double_fault /* This shouldn't happen... */
- movq PER_CPU_VAR(kernel_stack),%rax
-- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
-+ subq $(6*8),%rax /* Reset to original stack */
- movq %rax,RSP(%rdi)
- movq $0,(%rax) /* Missing (lost) #GP error code */
- movq $general_protection,RIP(%rdi)
- retq
- CFI_ENDPROC
--END(__do_double_fault)
-+ENDPROC(__do_double_fault)
- #else
- # define __do_double_fault do_double_fault
- #endif
-@@ -1195,7 +1691,7 @@ ENTRY(\sym)
+ /*
+ * End of kprobes section
+@@ -1151,7 +1647,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -23994,7 +23987,7 @@ index 03cd2a8..d236ccb 100644
.endm
#ifdef CONFIG_TRACING
-@@ -1283,7 +1779,7 @@ ENTRY(\sym)
+@@ -1239,7 +1735,7 @@ ENTRY(\sym)
call \do_sym
jmp error_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
@@ -24003,7 +23996,7 @@ index 03cd2a8..d236ccb 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1301,10 +1797,10 @@ ENTRY(\sym)
+@@ -1257,10 +1753,10 @@ ENTRY(\sym)
call \do_sym
jmp paranoid_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
@@ -24016,7 +24009,7 @@ index 03cd2a8..d236ccb 100644
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1317,12 +1813,18 @@ ENTRY(\sym)
+@@ -1273,12 +1769,18 @@ ENTRY(\sym)
TRACE_IRQS_OFF_DEBUG
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
@@ -24036,7 +24029,7 @@ index 03cd2a8..d236ccb 100644
.endm
.macro errorentry sym do_sym
-@@ -1340,7 +1842,7 @@ ENTRY(\sym)
+@@ -1296,7 +1798,7 @@ ENTRY(\sym)
call \do_sym
jmp error_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
@@ -24045,7 +24038,7 @@ index 03cd2a8..d236ccb 100644
.endm
#ifdef CONFIG_TRACING
-@@ -1371,7 +1873,7 @@ ENTRY(\sym)
+@@ -1327,7 +1829,7 @@ ENTRY(\sym)
call \do_sym
jmp paranoid_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
@@ -24054,7 +24047,7 @@ index 03cd2a8..d236ccb 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1401,9 +1903,10 @@ gs_change:
+@@ -1357,9 +1859,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -24066,7 +24059,7 @@ index 03cd2a8..d236ccb 100644
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1431,9 +1934,10 @@ ENTRY(do_softirq_own_stack)
+@@ -1387,9 +1890,10 @@ ENTRY(do_softirq_own_stack)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -24078,7 +24071,7 @@ index 03cd2a8..d236ccb 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1471,7 +1975,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1427,7 +1931,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -24087,7 +24080,7 @@ index 03cd2a8..d236ccb 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1530,7 +2034,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1486,7 +1990,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -24096,7 +24089,7 @@ index 03cd2a8..d236ccb 100644
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1582,18 +2086,33 @@ ENTRY(paranoid_exit)
+@@ -1538,18 +2042,33 @@ ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -24132,7 +24125,7 @@ index 03cd2a8..d236ccb 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1622,7 +2141,7 @@ paranoid_schedule:
+@@ -1578,7 +2097,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -24141,7 +24134,7 @@ index 03cd2a8..d236ccb 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1649,12 +2168,23 @@ ENTRY(error_entry)
+@@ -1605,12 +2124,23 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -24166,16 +24159,16 @@ index 03cd2a8..d236ccb 100644
ret
/*
-@@ -1681,7 +2211,7 @@ bstep_iret:
- movq %rcx,RIP+8(%rsp)
- jmp error_swapgs
+@@ -1644,7 +2174,7 @@ error_bad_iret:
+ decl %ebx /* Return to usergs */
+ jmp error_sti
CFI_ENDPROC
-END(error_entry)
+ENDPROC(error_entry)
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1692,7 +2222,7 @@ ENTRY(error_exit)
+@@ -1655,7 +2185,7 @@ ENTRY(error_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
@@ -24184,7 +24177,7 @@ index 03cd2a8..d236ccb 100644
jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
-@@ -1701,7 +2231,7 @@ ENTRY(error_exit)
+@@ -1664,7 +2194,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -24193,7 +24186,7 @@ index 03cd2a8..d236ccb 100644
/*
* Test if a given stack is an NMI stack or not.
-@@ -1759,9 +2289,11 @@ ENTRY(nmi)
+@@ -1722,9 +2252,11 @@ ENTRY(nmi)
* If %cs was not the kernel segment, then the NMI triggered in user
* space, which means it is definitely not nested.
*/
@@ -24206,7 +24199,7 @@ index 03cd2a8..d236ccb 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
-@@ -1795,8 +2327,7 @@ nested_nmi:
+@@ -1758,8 +2290,7 @@ nested_nmi:
1:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
@@ -24216,7 +24209,7 @@ index 03cd2a8..d236ccb 100644
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
-@@ -1814,6 +2345,7 @@ nested_nmi_out:
+@@ -1777,6 +2308,7 @@ nested_nmi_out:
CFI_RESTORE rdx
/* No need to check faults here */
@@ -24224,7 +24217,7 @@ index 03cd2a8..d236ccb 100644
INTERRUPT_RETURN
CFI_RESTORE_STATE
-@@ -1910,13 +2442,13 @@ end_repeat_nmi:
+@@ -1873,13 +2405,13 @@ end_repeat_nmi:
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
/*
@@ -24240,7 +24233,7 @@ index 03cd2a8..d236ccb 100644
DEFAULT_FRAME 0
/*
-@@ -1926,9 +2458,9 @@ end_repeat_nmi:
+@@ -1889,9 +2421,9 @@ end_repeat_nmi:
* NMI itself takes a page fault, the page fault that was preempted
* will read the information from the NMI page fault and not the
* origin fault. Save it off and restore it if it changes.
@@ -24252,7 +24245,7 @@ index 03cd2a8..d236ccb 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
-@@ -1937,31 +2469,36 @@ end_repeat_nmi:
+@@ -1900,31 +2432,36 @@ end_repeat_nmi:
/* Did the NMI take a page fault? Restore cr2 if it did */
movq %cr2, %rcx
@@ -27775,7 +27768,7 @@ index 1c113db..287b42e 100644
static int trace_irq_vector_refcount;
static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 57409f6..b505597 100644
+index f9d976e..3b48355 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -66,7 +66,7 @@
@@ -27854,7 +27847,7 @@ index 57409f6..b505597 100644
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
pr_cont("\n");
-@@ -251,6 +263,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+@@ -259,6 +271,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_DF;
@@ -27866,7 +27859,7 @@ index 57409f6..b505597 100644
#ifdef CONFIG_DOUBLEFAULT
df_debug(regs, error_code);
#endif
-@@ -273,7 +290,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -281,7 +298,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
conditional_sti(regs);
#ifdef CONFIG_X86_32
@@ -27875,7 +27868,7 @@ index 57409f6..b505597 100644
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
goto exit;
-@@ -281,18 +298,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -289,18 +306,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
#endif
tsk = current;
@@ -27920,7 +27913,16 @@ index 57409f6..b505597 100644
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
-@@ -453,7 +494,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -410,7 +451,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+ /* Copy the remainder of the stack from the current stack. */
+ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+
+- BUG_ON(!user_mode_vm(&new_stack->regs));
++ BUG_ON(!user_mode(&new_stack->regs));
+ return new_stack;
+ }
+ #endif
+@@ -490,7 +531,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
@@ -27929,7 +27931,7 @@ index 57409f6..b505597 100644
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
preempt_conditional_cli(regs);
-@@ -468,7 +509,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -505,7 +546,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
* We already checked v86 mode above, so we can check for kernel mode
* by just checking the CPL of CS.
*/
@@ -27938,7 +27940,7 @@ index 57409f6..b505597 100644
tsk->thread.debugreg6 &= ~DR_STEP;
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
-@@ -500,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+@@ -537,7 +578,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
return;
conditional_sti(regs);
@@ -33020,7 +33022,7 @@ index e395048..cd38278 100644
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index f35c66c..84b95ef 100644
+index 2308a40..b17a80d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
@@ -33144,7 +33146,7 @@ index f35c66c..84b95ef 100644
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
-@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
+@@ -1197,8 +1218,8 @@ int kern_addr_valid(unsigned long addr)
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
@@ -33155,7 +33157,7 @@ index f35c66c..84b95ef 100644
};
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
+@@ -1232,7 +1253,7 @@ int in_gate_area_no_mm(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -43627,6 +43629,19 @@ index 1946101..09766d2 100644
#include "qib_common.h"
#include "qib_verbs.h"
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index ce953d8..da10215 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -422,7 +422,7 @@ static int evdev_open(struct inode *inode, struct file *file)
+
+ err_free_client:
+ evdev_detach_client(evdev, client);
+- kfree(client);
++ kvfree(client);
+ return error;
+ }
+
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 24c41ba..102d71f 100644
--- a/drivers/input/gameport/gameport.c
@@ -43683,7 +43698,7 @@ index 4a95b22..874c182 100644
#include <linux/gameport.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
-index 603fe0d..f63decc 100644
+index 517829f..5e075c3 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
@@ -47418,7 +47433,7 @@ index c05b66d..ed69872 100644
break;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index 80bfa03..45114e6 100644
+index 80bfa03..1114364 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -534,7 +534,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
@@ -47430,6 +47445,16 @@ index 80bfa03..45114e6 100644
}
static void populate_erx_stats(struct be_adapter *adapter,
+@@ -4002,6 +4002,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev,
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
++ if (nla_len(attr) < sizeof(mode))
++ return -EINVAL;
++
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c11ecbc..13bb299 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
@@ -47469,6 +47494,20 @@ index e33ec6c..f54cfe7 100644
smp_mb(); /* Force the above update. */
}
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 18076c4..c2cb27f 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -7571,6 +7571,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
++ if (nla_len(attr) < sizeof(mode))
++ return -EINVAL;
++
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ reg = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a..acb28c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -47675,7 +47714,7 @@ index b54fd25..9bd2bae 100644
/* Ignore return since this msg is optional. */
rndis_filter_send_request(dev, request);
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
-index bf0d55e..82bcfbd1 100644
+index 6adbef8..cd6a5f1 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
@@ -47776,21 +47815,6 @@ index 5a1897d..e860630 100644
break;
err = 0;
break;
-diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
-index 1aff970..cc2ee29 100644
---- a/drivers/net/ppp/pptp.c
-+++ b/drivers/net/ppp/pptp.c
-@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
- int len = sizeof(struct sockaddr_pppox);
- struct sockaddr_pppox sp;
-
-- sp.sa_family = AF_PPPOX;
-+ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
-+
-+ sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_PPTP;
- sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
-
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 1252d9c..80e660b 100644
--- a/drivers/net/slip/slhc.c
@@ -48771,10 +48795,10 @@ index 5d45a1a..6f5f041 100644
static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
-index 5642ccc..01f03eb 100644
+index 22d49d5..dd5e4d7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
-@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
* sequence counter given by mac80211.
*/
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
@@ -48868,6 +48892,29 @@ index a912dc0..a8225ba 100644
u16 int_num;
ZD_ASSERT(in_interrupt());
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index e30d800..19db057 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ len = skb_frag_size(frag);
+ offset = frag->page_offset;
+
+- /* Data must not cross a page boundary. */
+- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
+-
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ while (len > 0) {
+ unsigned long bytes;
+
+- BUG_ON(offset >= PAGE_SIZE);
+-
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 683671a..4519fc2 100644
--- a/drivers/nfc/nfcwilink.c
@@ -49200,7 +49247,7 @@ index 53b58de..4479896 100644
int retval = -ENOMEM;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
-index fb02fc2..83dc2c3 100644
+index ced17f2..185c792 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -524,8 +524,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
@@ -49285,7 +49332,7 @@ index e1e7026..d28dd33 100644
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 6e34498..9911975 100644
+index 34dff3a..70a5646 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -51895,7 +51942,7 @@ index 38b4be2..c68af1c 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 9232c773..e42a77a 100644
+index e6463ef..357ef0a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1154,7 +1154,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -57561,10 +57608,10 @@ index ce25d75..dc09eeb 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index f45ddaa..0160abc 100644
+index 2f7e8c2..6c0f6ec 100644
--- a/fs/aio.c
+++ b/fs/aio.c
-@@ -381,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+@@ -389,7 +389,7 @@ static int aio_setup_ring(struct kioctx *ctx)
size += sizeof(struct io_event) * nr_events;
nr_pages = PFN_UP(size);
@@ -61264,6 +61311,72 @@ index 8825154..af51586 100644
}
static int
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index 6df8d3d..b8b92c2 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
+ }
+
+ alias = d_find_alias(inode);
+- if (alias && !vfat_d_anon_disconn(alias)) {
++ /*
++ * Checking "alias->d_parent == dentry->d_parent" to make sure
++ * FS is not corrupted (especially double linked dir).
++ */
++ if (alias && alias->d_parent == dentry->d_parent &&
++ !vfat_d_anon_disconn(alias)) {
+ /*
+ * This inode has non anonymous-DCACHE_DISCONNECTED
+ * dentry. This means, the user did ->lookup() by an
+@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
+
+ out:
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+- dentry->d_time = dentry->d_parent->d_inode->i_version;
+- dentry = d_splice_alias(inode, dentry);
+- if (dentry)
+- dentry->d_time = dentry->d_parent->d_inode->i_version;
+- return dentry;
+-
++ if (!inode)
++ dentry->d_time = dir->i_version;
++ return d_splice_alias(inode, dentry);
+ error:
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+ return ERR_PTR(err);
+@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+- dentry->d_time = dentry->d_parent->d_inode->i_version;
+ d_instantiate(dentry, inode);
+ out:
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+ fat_detach(inode);
++ dentry->d_time = dir->i_version;
+ out:
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+
+@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+ fat_detach(inode);
++ dentry->d_time = dir->i_version;
+ out:
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+
+@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+- dentry->d_time = dentry->d_parent->d_inode->i_version;
+ d_instantiate(dentry, inode);
+
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ef68665..5deacdc 100644
--- a/fs/fcntl.c
@@ -63190,7 +63303,7 @@ index acd3947..1f896e2 100644
memcpy(c->data, &cookie, 4);
c->len=4;
diff --git a/fs/locks.c b/fs/locks.c
-index 4dd39b9..12d6aaf 100644
+index 2c61c4e..ee5c867 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2218,16 +2218,16 @@ void locks_remove_flock(struct file *filp)
@@ -64044,10 +64157,10 @@ index 8657335..cd3e37f 100644
[OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
[OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
-index f8f060f..d9a7258 100644
+index 6040da8..e8607ce 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
-@@ -519,14 +519,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+@@ -518,14 +518,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct svc_cacherep *rp = rqstp->rq_cacherep;
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
@@ -64068,7 +64181,7 @@ index f8f060f..d9a7258 100644
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
-@@ -537,7 +540,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+@@ -536,7 +539,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
switch (cachetype) {
case RC_REPLSTAT:
if (len != 1)
@@ -78573,7 +78686,7 @@ index 77ff547..181834f 100644
#define pud_none(pud) 0
#define pud_bad(pud) 0
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index b7babf0..97f4c4f 100644
+index b7babf0..1e4b4f1 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -22,6 +22,12 @@
@@ -78834,7 +78947,15 @@ index b7babf0..97f4c4f 100644
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
-@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
+@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+ return atomic_add_negative(i, v);
+ }
+
+-static inline long atomic_long_add_return(long i, atomic_long_t *l)
++static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+
return (long)atomic_add_return(i, v);
}
@@ -79478,10 +79599,10 @@ index b4a745d..e3c0942 100644
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
-index be5fd38..d71192a 100644
+index 5d858e0..336c1d9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
-@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
+@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
* @word: value to rotate
* @shift: bits to roll
*/
@@ -79490,7 +79611,7 @@ index be5fd38..d71192a 100644
{
return (word << shift) | (word >> (32 - shift));
}
-@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
+@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
* @word: value to rotate
* @shift: bits to roll
*/
@@ -79499,7 +79620,7 @@ index be5fd38..d71192a 100644
{
return (word >> shift) | (word << (32 - shift));
}
-@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
+@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
return (__s32)(value << shift) >> shift;
}
@@ -82447,7 +82568,7 @@ index 5bba088..7ad4ae7 100644
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index d5039da..71096b6 100644
+index d5039da..152c9ea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -82481,7 +82602,16 @@ index d5039da..71096b6 100644
struct mmu_gather;
struct inode;
-@@ -1120,8 +1126,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -362,6 +368,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
+ }
+ #endif
+
++extern void kvfree(const void *addr);
++
+ static inline void compound_lock(struct page *page)
+ {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -1120,8 +1128,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
@@ -82492,7 +82622,7 @@ index d5039da..71096b6 100644
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1161,9 +1167,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1161,9 +1169,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@@ -82505,7 +82635,7 @@ index d5039da..71096b6 100644
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
-@@ -1195,34 +1201,6 @@ int set_page_dirty(struct page *page);
+@@ -1195,34 +1203,6 @@ int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -82540,7 +82670,7 @@ index d5039da..71096b6 100644
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
-@@ -1322,6 +1300,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
+@@ -1322,6 +1302,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
@@ -82556,7 +82686,7 @@ index d5039da..71096b6 100644
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-@@ -1340,8 +1327,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1340,8 +1329,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
{
return 0;
}
@@ -82572,7 +82702,7 @@ index d5039da..71096b6 100644
#endif
#ifdef __PAGETABLE_PMD_FOLDED
-@@ -1350,8 +1344,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1350,8 +1346,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
{
return 0;
}
@@ -82588,7 +82718,7 @@ index d5039da..71096b6 100644
#endif
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-@@ -1369,11 +1370,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1369,11 +1372,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
NULL: pud_offset(pgd, address);
}
@@ -82612,7 +82742,7 @@ index d5039da..71096b6 100644
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-@@ -1763,7 +1776,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1763,7 +1778,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
@@ -82621,7 +82751,7 @@ index d5039da..71096b6 100644
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1771,6 +1784,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1771,6 +1786,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
@@ -82629,7 +82759,7 @@ index d5039da..71096b6 100644
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1799,10 +1813,11 @@ struct vm_unmapped_area_info {
+@@ -1799,10 +1815,11 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
@@ -82643,7 +82773,7 @@ index d5039da..71096b6 100644
/*
* Search for an unmapped address range.
-@@ -1814,7 +1829,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1814,7 +1831,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long
@@ -82652,7 +82782,7 @@ index d5039da..71096b6 100644
{
if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
return unmapped_area(info);
-@@ -1874,6 +1889,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1874,6 +1891,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
@@ -82663,7 +82793,7 @@ index d5039da..71096b6 100644
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1902,15 +1921,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1902,15 +1923,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
@@ -82679,7 +82809,7 @@ index d5039da..71096b6 100644
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-@@ -1962,6 +1972,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -1962,6 +1974,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
@@ -82691,7 +82821,7 @@ index d5039da..71096b6 100644
mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -2043,7 +2058,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2043,7 +2060,7 @@ extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
@@ -82700,7 +82830,7 @@ index d5039da..71096b6 100644
extern int soft_offline_page(struct page *page, int flags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2078,5 +2093,11 @@ void __init setup_nr_node_ids(void);
+@@ -2078,5 +2095,11 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
@@ -85317,7 +85447,7 @@ index 9a36d92..0aafe2a 100644
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
-index a175ba4..196eb82 100644
+index a175ba4..196eb8242 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -36,7 +36,7 @@ struct unix_skb_parms {
@@ -88172,10 +88302,10 @@ index 569b2187..19940d9 100644
/* Callchain handling */
extern struct perf_callchain_entry *
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
-index 307d87c..6466cbe 100644
+index 1139b22..5aac2f9 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
-@@ -1666,7 +1666,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
+@@ -1665,7 +1665,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
{
struct page *page;
uprobe_opcode_t opcode;
@@ -94943,7 +95073,7 @@ index a98c7fc..393f8f1 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 492e36f..b153792 100644
+index 492e36f..732f880 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -94993,6 +95123,39 @@ index 492e36f..b153792 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+@@ -808,20 +814,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ if (!pte_file(pte)) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+- if (swap_duplicate(entry) < 0)
+- return entry.val;
++ if (likely(!non_swap_entry(entry))) {
++ if (swap_duplicate(entry) < 0)
++ return entry.val;
+
+- /* make sure dst_mm is on swapoff's mmlist. */
+- if (unlikely(list_empty(&dst_mm->mmlist))) {
+- spin_lock(&mmlist_lock);
+- if (list_empty(&dst_mm->mmlist))
+- list_add(&dst_mm->mmlist,
+- &src_mm->mmlist);
+- spin_unlock(&mmlist_lock);
+- }
+- if (likely(!non_swap_entry(entry)))
++ /* make sure dst_mm is on swapoff's mmlist. */
++ if (unlikely(list_empty(&dst_mm->mmlist))) {
++ spin_lock(&mmlist_lock);
++ if (list_empty(&dst_mm->mmlist))
++ list_add(&dst_mm->mmlist,
++ &src_mm->mmlist);
++ spin_unlock(&mmlist_lock);
++ }
+ rss[MM_SWAPENTS]++;
+- else if (is_migration_entry(entry)) {
++ } else if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
@@ -1137,8 +1143,10 @@ again:
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
@@ -95828,7 +95991,7 @@ index b1eb536..091d154 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index dfe90657..3892436 100644
+index dfe90657..390920e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -37,6 +37,7 @@
@@ -95914,7 +96077,21 @@ index dfe90657..3892436 100644
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -942,6 +970,12 @@ static int
+@@ -745,8 +773,11 @@ again: remove_next = 1 + (end > next->vm_end);
+ * shrinking vma had, to cover any anon pages imported.
+ */
+ if (exporter && exporter->anon_vma && !importer->anon_vma) {
+- if (anon_vma_clone(importer, exporter))
+- return -ENOMEM;
++ int error;
++
++ error = anon_vma_clone(importer, exporter);
++ if (error)
++ return error;
+ importer->anon_vma = exporter->anon_vma;
+ }
+ }
+@@ -942,6 +973,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -95927,7 +96104,7 @@ index dfe90657..3892436 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -961,6 +995,12 @@ static int
+@@ -961,6 +998,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -95940,7 +96117,7 @@ index dfe90657..3892436 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
-@@ -1003,13 +1043,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -1003,13 +1046,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -95962,7 +96139,7 @@ index dfe90657..3892436 100644
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -1025,6 +1072,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1025,6 +1075,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -95978,7 +96155,7 @@ index dfe90657..3892436 100644
/*
* Can it merge with the predecessor?
*/
-@@ -1044,9 +1100,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1044,9 +1103,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -96004,7 +96181,7 @@ index dfe90657..3892436 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(prev);
-@@ -1060,12 +1131,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1060,12 +1134,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -96034,7 +96211,7 @@ index dfe90657..3892436 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(area);
-@@ -1174,8 +1260,10 @@ none:
+@@ -1174,8 +1263,10 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -96047,7 +96224,7 @@ index dfe90657..3892436 100644
mm->total_vm += pages;
-@@ -1183,7 +1271,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+@@ -1183,7 +1274,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
mm->exec_vm += pages;
@@ -96056,7 +96233,7 @@ index dfe90657..3892436 100644
mm->stack_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -1213,6 +1301,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
+@@ -1213,6 +1304,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -96064,7 +96241,7 @@ index dfe90657..3892436 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1239,7 +1328,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1239,7 +1331,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -96073,7 +96250,7 @@ index dfe90657..3892436 100644
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -1265,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1265,7 +1357,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -96082,7 +96259,7 @@ index dfe90657..3892436 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1276,6 +1365,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1276,6 +1368,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -96126,7 +96303,7 @@ index dfe90657..3892436 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1363,6 +1489,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1363,6 +1492,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -96136,7 +96313,7 @@ index dfe90657..3892436 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1456,7 +1585,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1456,7 +1588,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -96145,7 +96322,7 @@ index dfe90657..3892436 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1502,7 +1631,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1502,7 +1634,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
@@ -96168,7 +96345,7 @@ index dfe90657..3892436 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1521,11 +1665,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1521,11 +1668,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -96181,7 +96358,7 @@ index dfe90657..3892436 100644
}
/*
-@@ -1556,6 +1699,16 @@ munmap_back:
+@@ -1556,6 +1702,16 @@ munmap_back:
goto unacct_error;
}
@@ -96198,7 +96375,7 @@ index dfe90657..3892436 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1575,6 +1728,13 @@ munmap_back:
+@@ -1575,6 +1731,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -96212,7 +96389,7 @@ index dfe90657..3892436 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1608,6 +1768,12 @@ munmap_back:
+@@ -1608,6 +1771,12 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -96225,7 +96402,7 @@ index dfe90657..3892436 100644
/* Once vma denies write, undo our temporary denial count */
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
-@@ -1616,6 +1782,7 @@ out:
+@@ -1616,6 +1785,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -96233,7 +96410,7 @@ index dfe90657..3892436 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1648,6 +1815,12 @@ unmap_and_free_vma:
+@@ -1648,6 +1818,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -96246,7 +96423,7 @@ index dfe90657..3892436 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1655,7 +1828,63 @@ unacct_error:
+@@ -1655,7 +1831,63 @@ unacct_error:
return error;
}
@@ -96311,7 +96488,7 @@ index dfe90657..3892436 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1703,11 +1932,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1703,11 +1935,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -96342,7 +96519,7 @@ index dfe90657..3892436 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1757,7 +2004,7 @@ found:
+@@ -1757,7 +2007,7 @@ found:
return gap_start;
}
@@ -96351,7 +96528,7 @@ index dfe90657..3892436 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1811,6 +2058,24 @@ check_current:
+@@ -1811,6 +2061,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -96376,7 +96553,7 @@ index dfe90657..3892436 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1874,6 +2139,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1874,6 +2142,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -96384,7 +96561,7 @@ index dfe90657..3892436 100644
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
-@@ -1881,11 +2147,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1881,11 +2150,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -96401,7 +96578,7 @@ index dfe90657..3892436 100644
return addr;
}
-@@ -1894,6 +2164,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1894,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
@@ -96409,7 +96586,7 @@ index dfe90657..3892436 100644
return vm_unmapped_area(&info);
}
#endif
-@@ -1912,6 +2183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1912,6 +2186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -96417,7 +96594,7 @@ index dfe90657..3892436 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
-@@ -1920,12 +2192,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1920,12 +2195,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -96435,7 +96612,7 @@ index dfe90657..3892436 100644
return addr;
}
-@@ -1934,6 +2210,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1934,6 +2213,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -96443,7 +96620,7 @@ index dfe90657..3892436 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1946,6 +2223,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1946,6 +2226,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -96456,7 +96633,7 @@ index dfe90657..3892436 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -2046,6 +2329,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2046,6 +2332,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -96485,7 +96662,7 @@ index dfe90657..3892436 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2062,6 +2367,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2062,6 +2370,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -96493,7 +96670,7 @@ index dfe90657..3892436 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2072,6 +2378,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2072,6 +2381,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -96501,7 +96678,7 @@ index dfe90657..3892436 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2101,37 +2408,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2101,37 +2411,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -96559,7 +96736,7 @@ index dfe90657..3892436 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2166,6 +2484,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2166,6 +2487,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -96568,7 +96745,7 @@ index dfe90657..3892436 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
-@@ -2180,6 +2500,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2180,6 +2503,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -96577,7 +96754,7 @@ index dfe90657..3892436 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2193,6 +2515,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2193,6 +2518,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -96593,7 +96770,7 @@ index dfe90657..3892436 100644
vma_lock_anon_vma(vma);
/*
-@@ -2202,9 +2533,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2202,9 +2536,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -96612,7 +96789,7 @@ index dfe90657..3892436 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2229,13 +2568,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2229,13 +2571,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -96640,7 +96817,7 @@ index dfe90657..3892436 100644
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
-@@ -2333,6 +2686,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2333,6 +2689,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -96654,7 +96831,7 @@ index dfe90657..3892436 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2377,6 +2737,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2377,6 +2740,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -96671,7 +96848,7 @@ index dfe90657..3892436 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2404,14 +2774,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2404,14 +2777,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -96705,7 +96882,7 @@ index dfe90657..3892436 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2424,6 +2813,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2424,11 +2816,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -96728,7 +96905,14 @@ index dfe90657..3892436 100644
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
-@@ -2443,6 +2848,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+
+- if (anon_vma_clone(new, vma))
++ err = anon_vma_clone(new, vma);
++ if (err)
+ goto out_free_mpol;
+
+ if (new->vm_file)
+@@ -2443,6 +2852,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -96767,7 +96951,7 @@ index dfe90657..3892436 100644
/* Success. */
if (!err)
return 0;
-@@ -2452,10 +2889,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2452,10 +2893,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -96787,7 +96971,7 @@ index dfe90657..3892436 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2468,6 +2913,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2468,6 +2917,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -96803,7 +96987,7 @@ index dfe90657..3892436 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2479,11 +2933,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2479,11 +2937,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -96834,7 +97018,7 @@ index dfe90657..3892436 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2558,6 +3031,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2558,6 +3035,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -96843,7 +97027,7 @@ index dfe90657..3892436 100644
return 0;
}
-@@ -2566,6 +3041,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2566,6 +3045,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -96857,7 +97041,7 @@ index dfe90657..3892436 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2579,16 +3061,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2579,16 +3065,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -96874,7 +97058,7 @@ index dfe90657..3892436 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2602,6 +3074,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2602,6 +3078,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -96882,7 +97066,7 @@ index dfe90657..3892436 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2609,10 +3082,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2609,10 +3086,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -96907,7 +97091,7 @@ index dfe90657..3892436 100644
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
-@@ -2626,21 +3113,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2626,21 +3117,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -96932,7 +97116,7 @@ index dfe90657..3892436 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2654,7 +3140,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2654,7 +3144,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -96941,7 +97125,7 @@ index dfe90657..3892436 100644
return -ENOMEM;
}
-@@ -2668,10 +3154,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2668,10 +3158,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -96955,7 +97139,7 @@ index dfe90657..3892436 100644
return addr;
}
-@@ -2733,6 +3220,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2733,6 +3224,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -96963,7 +97147,7 @@ index dfe90657..3892436 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2750,6 +3238,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2750,6 +3242,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -96977,7 +97161,7 @@ index dfe90657..3892436 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2773,7 +3268,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2773,7 +3272,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -96999,7 +97183,7 @@ index dfe90657..3892436 100644
return 0;
}
-@@ -2792,6 +3301,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2792,6 +3305,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
@@ -97008,7 +97192,7 @@ index dfe90657..3892436 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2856,6 +3367,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2856,6 +3371,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -97048,7 +97232,7 @@ index dfe90657..3892436 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2867,6 +3411,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2867,6 +3415,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -97056,7 +97240,7 @@ index dfe90657..3892436 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2937,6 +3482,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2937,6 +3486,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -97719,7 +97903,7 @@ index fd26d04..0cea1b0 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index cdbd312..2e1e0b9 100644
+index cdbd312..cb05259 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
@@ -97799,7 +97983,7 @@ index cdbd312..2e1e0b9 100644
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
-@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
@@ -97808,7 +97992,23 @@ index cdbd312..2e1e0b9 100644
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
-@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
++ int error;
+
+ /* Don't bother if the parent process has no anon_vma here. */
+ if (!pvma->anon_vma)
+@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ * First, attach the new VMA to the parent VMA's anon_vmas,
+ * so rmap can find non-COWed pages in child processes.
+ */
+- if (anon_vma_clone(vma, pvma))
+- return -ENOMEM;
++ error = anon_vma_clone(vma, pvma);
++ if (error)
++ return error;
+
+ /* Then add our own anon_vma. */
+ anon_vma = anon_vma_alloc();
+@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
@@ -99053,10 +99253,18 @@ index beeeef8..1cb288b 100644
if (S_ISREG(inode->i_mode))
diff --git a/mm/util.c b/mm/util.c
-index c1010cb..91e1a36 100644
+index c1010cb..210c536 100644
--- a/mm/util.c
+++ b/mm/util.c
-@@ -294,6 +294,12 @@ done:
+@@ -9,6 +9,7 @@
+ #include <linux/swapops.h>
+ #include <linux/mman.h>
+ #include <linux/hugetlb.h>
++#include <linux/vmalloc.h>
+
+ #include <asm/uaccess.h>
+
+@@ -294,6 +295,12 @@ done:
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -99069,6 +99277,22 @@ index c1010cb..91e1a36 100644
mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
+@@ -383,6 +390,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
+ }
+ EXPORT_SYMBOL(vm_mmap);
+
++void kvfree(const void *addr)
++{
++ if (is_vmalloc_addr(addr))
++ vfree(addr);
++ else
++ kfree(addr);
++}
++EXPORT_SYMBOL(kvfree);
++
+ struct address_space *page_mapping(struct page *page)
+ {
+ struct address_space *mapping = page->mapping;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0fdf968..991ff6a 100644
--- a/mm/vmalloc.c
@@ -99352,6 +99576,34 @@ index 0fdf968..991ff6a 100644
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
+diff --git a/mm/vmpressure.c b/mm/vmpressure.c
+index d4042e7..c5afd57 100644
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
+ unsigned long scanned;
+ unsigned long reclaimed;
+
++ spin_lock(&vmpr->sr_lock);
+ /*
+ * Several contexts might be calling vmpressure(), so it is
+ * possible that the work was rescheduled again before the old
+@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
+ * here. No need for any locks here since we don't care if
+ * vmpr->reclaimed is in sync.
+ */
+- if (!vmpr->scanned)
++ scanned = vmpr->scanned;
++ if (!scanned) {
++ spin_unlock(&vmpr->sr_lock);
+ return;
++ }
+
+- spin_lock(&vmpr->sr_lock);
+- scanned = vmpr->scanned;
+ reclaimed = vmpr->reclaimed;
+ vmpr->scanned = 0;
+ vmpr->reclaimed = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index def5dd2..4ce55cec 100644
--- a/mm/vmstat.c
@@ -100679,7 +100931,7 @@ index fdac61c..e5e5b46 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index b0db904..70b5ea2 100644
+index b0db904..dc1f9f2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -100717,6 +100969,26 @@ index b0db904..70b5ea2 100644
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+@@ -2684,6 +2687,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (br_spec) {
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++ if (nla_len(attr) < sizeof(flags))
++ return -EINVAL;
++
+ have_flags = true;
+ flags = nla_get_u16(attr);
+ break;
+@@ -2754,6 +2760,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (br_spec) {
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++ if (nla_len(attr) < sizeof(flags))
++ return -EINVAL;
++
+ have_flags = true;
+ flags = nla_get_u16(attr);
+ break;
diff --git a/net/core/scm.c b/net/core/scm.c
index b442e7e..6f5b5a2 100644
--- a/net/core/scm.c
@@ -101681,7 +101953,7 @@ index 2510c02..cfb34fa 100644
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index e21934b..4e7cb58 100644
+index 0d33f94..fcd69aa 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -59,7 +59,7 @@ struct ping_table {
@@ -101693,7 +101965,7 @@ index e21934b..4e7cb58 100644
EXPORT_SYMBOL_GPL(pingv6_ops);
static u16 ping_port_rover;
-@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+@@ -350,7 +350,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
return -ENODEV;
}
}
@@ -101702,7 +101974,7 @@ index e21934b..4e7cb58 100644
scoped);
rcu_read_unlock();
-@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
+@@ -558,7 +558,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
}
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -101711,7 +101983,7 @@ index e21934b..4e7cb58 100644
#endif
}
-@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
+@@ -576,7 +576,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
info, (u8 *)icmph);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
@@ -101720,7 +101992,7 @@ index e21934b..4e7cb58 100644
info, (u8 *)icmph);
#endif
}
-@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -860,7 +860,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
@@ -101729,7 +102001,7 @@ index e21934b..4e7cb58 100644
addr_len);
#endif
}
-@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -918,10 +918,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
if (inet6_sk(sk)->rxopt.all)
@@ -101742,7 +102014,7 @@ index e21934b..4e7cb58 100644
else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
-@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1113,7 +1113,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -103119,9 +103391,19 @@ index 20b63d2..31a777d 100644
kfree_skb(skb);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index 5f8e128..d32ac8c 100644
+index 5f8e128..9e02f78 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
+@@ -130,8 +130,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ {
+ struct flowi6 *fl6 = &fl->u.ip6;
+ int onlyproto = 0;
+- u16 offset = skb_network_header_len(skb);
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
++ u16 offset = sizeof(*hdr);
+ struct ipv6_opt_hdr *exthdr;
+ const unsigned char *nh = skb_network_header(skb);
+ u8 nexthdr = nh[IP6CB(skb)->nhoff];
@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
case IPPROTO_DCCP:
if (!onlyproto && (nh + offset + 4 < skb->data ||
@@ -106509,7 +106791,7 @@ index 51207e4..f7d603d 100644
struct module {
struct module *next;
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
-index deb2994..af4f63e 100644
+index deb2994..af4f63e8 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
@@ -106536,6 +106818,19 @@ index 0865b3e..7235dd4 100644
__ksymtab : { *(SORT(___ksymtab+*)) }
__ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
__ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
+diff --git a/scripts/package/Makefile b/scripts/package/Makefile
+index c5d4733..7c43eb4 100644
+--- a/scripts/package/Makefile
++++ b/scripts/package/Makefile
+@@ -46,7 +46,7 @@ rpm-pkg rpm: FORCE
+ ln -sf $(srctree) $(KERNELPATH)
+ $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
+ $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
+- tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(TAR_CONTENT)
++ tar --owner=root --group=root -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(TAR_CONTENT)
+ rm $(KERNELPATH)
+ rm -f $(objtree)/.scmversion
+ $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 152d4d2..791684c 100644
--- a/scripts/package/builddeb
@@ -106549,10 +106844,18 @@ index 152d4d2..791684c 100644
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
-index 1395760..6fb75f2 100755
+index 1395760..6ceef68 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
-@@ -129,6 +129,18 @@ echo ""
+@@ -121,14 +121,27 @@ echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
+ echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
+ echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
+ echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
+-echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
+-echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
+-echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
+
+ echo ""
echo "%clean"
echo 'rm -rf $RPM_BUILD_ROOT'
echo ""
@@ -106568,27 +106871,38 @@ index 1395760..6fb75f2 100755
+echo 'chmod -f 0500 /lib64/modules'
+echo 'fi'
+echo ""
++echo "%post devel"
++echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/build"
++echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/source"
++echo ""
echo "%post"
echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
-@@ -139,7 +151,7 @@ echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm
+@@ -139,11 +152,11 @@ echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm
echo "fi"
echo ""
echo "%files"
-echo '%defattr (-, root, root)'
+echo '%defattr (400, root, root, 500)'
echo "%dir /lib/modules"
- echo "/lib/modules/$KERNELRELEASE"
+-echo "/lib/modules/$KERNELRELEASE"
echo "%exclude /lib/modules/$KERNELRELEASE/build"
-@@ -152,7 +164,7 @@ echo '%defattr (-, root, root)'
+ echo "%exclude /lib/modules/$KERNELRELEASE/source"
++echo "/lib/modules/$KERNELRELEASE"
+ echo "/lib/firmware/$KERNELRELEASE"
+ echo "/boot/*"
+ echo ""
+@@ -152,8 +165,7 @@ echo '%defattr (-, root, root)'
echo "/usr/include"
echo ""
echo "%files devel"
-echo '%defattr (-, root, root)'
+echo '%defattr (400, root, root, 500)'
++echo "%dir /lib/modules/$KERNELRELEASE"
echo "/usr/src/kernels/$KERNELRELEASE"
- echo "/lib/modules/$KERNELRELEASE/build"
- echo "/lib/modules/$KERNELRELEASE/source"
+-echo "/lib/modules/$KERNELRELEASE/build"
+-echo "/lib/modules/$KERNELRELEASE/source"
+ echo ""
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 68bb4ef..2f419e1 100644
--- a/scripts/pnmtologo.c
@@ -106637,10 +106951,10 @@ index 8fac3fd..32ff38d 100644
unsigned int secindex_strings;
diff --git a/security/Kconfig b/security/Kconfig
-index beb86b5..addbccd 100644
+index beb86b5..4c193cc 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,969 @@
+@@ -4,6 +4,974 @@
menu "Security options"
@@ -106804,6 +107118,11 @@ index beb86b5..addbccd 100644
+ help
+ Choose this option if this kernel is running as a VirtualBox guest or host.
+
++config GRKERNSEC_CONFIG_VIRT_HYPERV
++ bool "Hyper-V"
++ help
++ Choose this option if this kernel is running as a Hyper-V guest.
++
+endchoice
+
+choice
@@ -107610,7 +107929,7 @@ index beb86b5..addbccd 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1066,7 @@ config INTEL_TXT
+@@ -103,7 +1071,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -107634,6 +107953,40 @@ index fdaa50c..2761dcb 100644
struct path_cond cond = {
old_dentry->d_inode->i_uid,
old_dentry->d_inode->i_mode
+diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
+index 8fb1488..97130f8 100644
+--- a/security/apparmor/include/apparmor.h
++++ b/security/apparmor/include/apparmor.h
+@@ -66,7 +66,6 @@ extern int apparmor_initialized __initdata;
+ char *aa_split_fqname(char *args, char **ns_name);
+ void aa_info_message(const char *str);
+ void *__aa_kvmalloc(size_t size, gfp_t flags);
+-void kvfree(void *buffer);
+
+ static inline void *kvmalloc(size_t size)
+ {
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index 6968992..c1827e0 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -104,17 +104,3 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
+ }
+ return buffer;
+ }
+-
+-/**
+- * kvfree - free an allocation do by kvmalloc
+- * @buffer: buffer to free (MAYBE_NULL)
+- *
+- * Free a buffer allocated by kvmalloc
+- */
+-void kvfree(void *buffer)
+-{
+- if (is_vmalloc_addr(buffer))
+- vfree(buffer);
+- else
+- kfree(buffer);
+-}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 4257b7e..2d0732d 100644
--- a/security/apparmor/lsm.c