aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2014-12-25 07:22:37 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2014-12-31 11:01:49 +0000
commit05747d20d5ead3cc80480bd80ffc32ac65b10dc8 (patch)
treee7d06cf5dd28c7da96943a9dbbe4b49f3118cc85
parent4e5bc2017ade39b6896beaf9be2aa06155ad389f (diff)
downloadaports-05747d20d5ead3cc80480bd80ffc32ac65b10dc8.tar.bz2
aports-05747d20d5ead3cc80480bd80ffc32ac65b10dc8.tar.xz
main/linux-grsec: upgrade to 3.14.27
(cherry picked from commit a75218abaacb81920db680713bb033dfa3fa3eba)
-rw-r--r--main/linux-grsec/APKBUILD24
-rw-r--r--main/linux-grsec/gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch52
-rw-r--r--main/linux-grsec/grsecurity-3.0-3.14.27-201412211908.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch)1164
-rw-r--r--main/linux-grsec/net-gre-Set-inner-mac-header-in-gro-complete.patch14
4 files changed, 974 insertions, 280 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 342eb63514..0f4e25fbfa 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,7 +2,7 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.14.26
+pkgver=3.14.27
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -17,11 +17,11 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-3.0-3.14.26-201412071005.patch
+ grsecurity-3.0-3.14.27-201412211908.patch
fix-memory-map-for-PIE-applications.patch
imx6q-no-unclocked-sleep.patch
- net-gre-Set-inner-mac-header-in-gro-complete.patch
+ gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
kernelconfig.x86
kernelconfig.x86_64
@@ -166,29 +166,29 @@ dev() {
}
md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz
-8ca9b85121711a42bf37812759c6ca4b patch-3.14.26.xz
-6d4569be80fb761e0193a2a22bbb0421 grsecurity-3.0-3.14.26-201412071005.patch
+d79fd9ea62b9c9dd3c17ed7651a9e408 patch-3.14.27.xz
+0ccf786b0da3ba2968c17bcb0e08bdf9 grsecurity-3.0-3.14.27-201412211908.patch
c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch
-57f564077ae0b6f10767cd39856ae2a1 net-gre-Set-inner-mac-header-in-gro-complete.patch
+59a78a67677e25540028414bb5eb6330 gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
870b91f0eb07294ba453ac61b052c0b6 kernelconfig.x86
38b50cd1a7670f886c5e9fe9f1f91496 kernelconfig.x86_64
6709c83fbbd38d40f31d39f0022d4ce9 kernelconfig.armhf"
sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz
-18a5f194acd519c0da5c208172874d8e3a48cfedb9126b381d168ffdf0a357b1 patch-3.14.26.xz
-56391c13e3d9f860681acdb45d2499ba62a92787ea1634ca3840303c0345c9b7 grsecurity-3.0-3.14.26-201412071005.patch
+5f84a4ff394444486d1715d5283383a8461ff089ed9b9fdc5dde2ed65531d21e patch-3.14.27.xz
+6a54cf72bf2d0231f6c1e13eda0585919178e66312270522d91a9c34c32643f7 grsecurity-3.0-3.14.27-201412211908.patch
500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch
21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch
-11fc1c1af92e7b8c519b39e46441ffddc2470d1ac4b9af4195c2472600c274d4 net-gre-Set-inner-mac-header-in-gro-complete.patch
+f04d0f6610398f3657ddb2e6926113c43ec331ae256704bca4de11f432881ec5 gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
bf953a65ba047b5316509da5bc7a6dbcee12767e343d26e8360369d27bfdbe78 kernelconfig.x86
d555a01f2b464e20cfa71c67ea6d571f80c707c5a3fea33879de09b085e2d7b6 kernelconfig.x86_64
01a6c90cf0643f8727d120aede2267ca7303c4ebe548c5d19222d4387ceb98cc kernelconfig.armhf"
sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz
-dffc53bb779f1fd9a9836c148e14394e6498bcaac7dfc2f712e725dfbc56b39702fffa20ef06d7abe418c8d118876ead7e8fc9c21ca990a61f0f10bcefbba749 patch-3.14.26.xz
-3a46876530ad9b1857297892c09b018a1f7dd635d73b23e11045c4001718f095fe3032b2f022a878da2499705e5a214e4aab7a3f7a24df66a2286a29e7dd8a11 grsecurity-3.0-3.14.26-201412071005.patch
+1191ef739905b2e5057c5273e5cf026baea1ea4855dca8375dbe4ecaa7e6d2d38b8103e2781554f2d9ecf9026fdad1086c6b9d8f0b41fcb8e39aca0612e208e7 patch-3.14.27.xz
+c136f386b848daefce89e758cd0cb737406e858cbc1b11b241a669a48e44b4ffd6ef0fc731538becdcdf38d4f887ecf4c020983e3e41068caa6f3f83646adf9b grsecurity-3.0-3.14.27-201412211908.patch
4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch
87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch
-f51377cb9a481aec98dd396712f2e0da39ac75b65ed6d439c023e25d4e799ec3a2f88a62c68b3c9dd6de18ca91b4b262186b9d8183e2fd24e9d7adfa99674871 net-gre-Set-inner-mac-header-in-gro-complete.patch
+ddc32533bd519db5298895eb2da5eb95390999bd3f6d27b5eee38551387df4a43f537235d6a9be859ee1f433420f3afbf01e2c1e7ca0175b27460598c5c385f9 gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
dde402be39f68955f9395f807631f1457e90cda76a80e0e198695c8f946cdba02a00fe12a59a77bf5e8b40f5ecb52efbe364449f3e58d8996f27e07b719ac6a4 kernelconfig.x86
f23749a1cd59c1de769141cef1a358ba3be0985abbfb2fdd065e033c5166f30728192fbf8805b150cf0b1b72a794990da2d9e6e511213cf00d2f0dc47ca61135 kernelconfig.x86_64
64e421a07bd42e83553338bfdbe16a68dbe94fdb3cb1b3658311f79e002345cc9c8edfcc807d4f989a64f8be4b3a48b4a0b7582ac860f5eacb9ff325a3d36fc5 kernelconfig.armhf"
diff --git a/main/linux-grsec/gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch b/main/linux-grsec/gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
new file mode 100644
index 0000000000..92ee9a9689
--- /dev/null
+++ b/main/linux-grsec/gre-fix-the-inner-mac-header-in-nbma-gre-tunnels-xmit-path.patch
@@ -0,0 +1,52 @@
+From a09d1e25a3f333dfb0034f2812750fdb0506ba5d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+Date: Wed, 10 Dec 2014 08:57:23 +0200
+Subject: [PATCH] gre: fix the inner mac header in nbma gre tunnels xmit path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The NBMA GRE tunnels temporarily push GRE header that contain the
+per-packet NBMA destination on the skb via header ops early in xmit
+path. It is the later pulled before the real GRE header is constructed.
+
+The inner mac was thus set differently in nbma case. Fix this be
+reordering the pull before calling offload handler to make sure
+both tunnel types have inner mac header set same way.
+
+Fixes: 14051f0452a2 ("gre: Use inner mac length when computing tunnel length"
+Signed-off-by: Timo Teräs <timo.teras@iki.fi>
+---
+ net/ipv4/ip_gre.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 94213c8..afedb52 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -250,10 +250,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct iphdr *tnl_params;
+
+- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+- if (IS_ERR(skb))
+- goto out;
+-
+ if (dev->header_ops) {
+ /* Need space for new headers */
+ if (skb_cow_head(skb, dev->needed_headroom -
+@@ -273,6 +269,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ tnl_params = &tunnel->parms.iph;
+ }
+
++ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
++ if (IS_ERR(skb))
++ goto out;
++
+ __gre_xmit(skb, dev, tnl_params, skb->protocol);
+
+ return NETDEV_TX_OK;
+--
+2.2.0
+
+
diff --git a/main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch b/main/linux-grsec/grsecurity-3.0-3.14.27-201412211908.patch
index 0803058730..9c6d79742d 100644
--- a/main/linux-grsec/grsecurity-3.0-3.14.26-201412071005.patch
+++ b/main/linux-grsec/grsecurity-3.0-3.14.27-201412211908.patch
@@ -292,7 +292,7 @@ index 7116fda..2f71588 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 63a5ee8..d99d2d9 100644
+index 944db23..f799f3e 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -897,7 +897,7 @@ index 4733d32..b142a40 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 62d2cb5..26e43ca 100644
+index 62d2cb5..26a6f3c 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -18,17 +18,41 @@
@@ -932,7 +932,7 @@ index 62d2cb5..26e43ca 100644
#define atomic_read(v) (*(volatile int *)&(v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
-+ return v->counter;
++ return *(const volatile int *)&v->counter;
+}
#define atomic_set(v,i) (((v)->counter) = (i))
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
@@ -9624,7 +9624,7 @@ index 6777177..cb5e44f 100644
addr = vm_unmapped_area(&info);
}
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
-index be56a24..443328f 100644
+index be56a24..eaef2ca 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,18 +14,40 @@
@@ -9633,12 +9633,12 @@ index be56a24..443328f 100644
#define atomic_read(v) (*(volatile int *)&(v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
-+ return v->counter;
++ return *(const volatile int *)&v->counter;
+}
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
-+ return v->counter;
++ return *(const volatile long *)&v->counter;
+}
#define atomic_set(v, i) (((v)->counter) = i)
@@ -9893,10 +9893,18 @@ index 9b1c36d..209298b 100644
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
-index 2c8d41f..06b1206 100644
+index 2c8d41f..f337fbc 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
-@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+ }
+
+ #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
}
#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
@@ -12342,7 +12350,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 98aa930..d2cef74 100644
+index 98aa930..9cfc3c7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
@@ -12379,7 +12387,15 @@ index 98aa930..d2cef74 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1129,7 +1131,7 @@ choice
+@@ -973,6 +975,7 @@ config VM86
+
+ config X86_16BIT
+ bool "Enable support for 16-bit segments" if EXPERT
++ depends on !GRKERNSEC
+ default y
+ ---help---
+ This option is required by programs like Wine to run 16-bit
+@@ -1129,7 +1132,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -12388,7 +12404,7 @@ index 98aa930..d2cef74 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1166,7 +1168,7 @@ config NOHIGHMEM
+@@ -1166,7 +1169,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -12397,7 +12413,7 @@ index 98aa930..d2cef74 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1219,7 +1221,7 @@ config PAGE_OFFSET
+@@ -1219,7 +1222,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -12406,7 +12422,7 @@ index 98aa930..d2cef74 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1624,6 +1626,7 @@ source kernel/Kconfig.hz
+@@ -1624,6 +1627,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
@@ -12414,7 +12430,7 @@ index 98aa930..d2cef74 100644
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1775,7 +1778,9 @@ config X86_NEED_RELOCS
+@@ -1775,7 +1779,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -12425,7 +12441,7 @@ index 98aa930..d2cef74 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1855,9 +1860,10 @@ config DEBUG_HOTPLUG_CPU0
+@@ -1855,9 +1861,10 @@ config DEBUG_HOTPLUG_CPU0
If unsure, say N.
config COMPAT_VDSO
@@ -12586,7 +12602,7 @@ index 50f8c5e..4f84fff 100644
return diff;
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 14fe7cb..829b962 100644
+index b5bb498..74110e8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
@@ -16951,6 +16967,18 @@ index 9454c16..e4100e3 100644
#define flush_insn_slot(p) do { } while (0)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index e9dc029..468a823 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -55,6 +55,7 @@
+ #define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL
+ #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
+ 0xFFFFFF0000000000ULL)
++#define CR3_PCID_INVD (1UL << 63)
+ #define CR4_RESERVED_BITS \
+ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
+ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 4ad6560..75c7bdd 100644
--- a/arch/x86/include/asm/local.h
@@ -20494,6 +20522,24 @@ index bbae024..e1528f9 100644
#define BIOS_END 0x00100000
#define BIOS_ROM_BASE 0xffe00000
+diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
+index 46727eb..6e1aaf7 100644
+--- a/arch/x86/include/uapi/asm/ldt.h
++++ b/arch/x86/include/uapi/asm/ldt.h
+@@ -28,6 +28,13 @@ struct user_desc {
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ #ifdef __x86_64__
++ /*
++ * Because this bit is not present in 32-bit user code, user
++ * programs can pass uninitialized values here. Therefore, in
++ * any context in which a user_desc comes from a 32-bit program,
++ * the kernel must act as though lm == 0, regardless of the
++ * actual value.
++ */
+ unsigned int lm:1;
+ #endif
+ };
diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
index 7b0a55a..ad115bf 100644
--- a/arch/x86/include/uapi/asm/ptrace-abi.h
@@ -21654,10 +21700,10 @@ index df5e41f..816c719 100644
extern int generic_get_free_region(unsigned long base, unsigned long size,
int replace_reg);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 79f9f84..38ace52 100644
+index fb345c4..445b2d0 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
+@@ -1354,7 +1354,7 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n");
}
@@ -21666,7 +21712,7 @@ index 79f9f84..38ace52 100644
.name = "format",
.attrs = NULL,
};
-@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = {
+@@ -1453,7 +1453,7 @@ static struct attribute *events_attr[] = {
NULL,
};
@@ -21675,7 +21721,7 @@ index 79f9f84..38ace52 100644
.name = "events",
.attrs = events_attr,
};
-@@ -1971,7 +1971,7 @@ static unsigned long get_segment_base(unsigned int segment)
+@@ -1974,7 +1974,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
@@ -21684,7 +21730,7 @@ index 79f9f84..38ace52 100644
}
return get_desc_base(desc + idx);
-@@ -2061,7 +2061,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+@@ -2064,7 +2064,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
break;
perf_callchain_store(entry, frame.return_address);
@@ -21707,10 +21753,10 @@ index 639d128..e92d7e5 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index 5ee8064..4d32df9 100644
+index d4c0a0e..4057f84 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -2318,10 +2318,10 @@ __init int intel_pmu_init(void)
+@@ -2354,10 +2354,10 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -24288,10 +24334,42 @@ index 02553d6..54e9bd5 100644
/*
* End of kprobes section
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
-index 94d857f..bf1f0bf 100644
+index 94d857f..5bce89c 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
-@@ -197,7 +197,7 @@ void init_espfix_ap(void)
+@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
+ #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
+ static void *espfix_pages[ESPFIX_MAX_PAGES];
+
+-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+- __aligned(PAGE_SIZE);
++static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
+
+ static unsigned int page_random, slot_random;
+
+@@ -122,14 +121,17 @@ static void init_espfix_random(void)
+ void __init init_espfix_bsp(void)
+ {
+ pgd_t *pgd_p;
+- pteval_t ptemask;
+-
+- ptemask = __supported_pte_mask;
++ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
+
+ /* Install the espfix pud into the kernel page directory */
+- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
++ pgd_p = &init_level4_pgt[index];
+ pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
++ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
++#endif
++
+ /* Randomize the locations */
+ init_espfix_random();
+
+@@ -197,7 +199,7 @@ void init_espfix_ap(void)
set_pte(&pte_p[n*PTE_STRIDE], pte);
/* Job is done for this CPU and any CPU which shares this page */
@@ -25785,6 +25863,38 @@ index c2bedae..25e7ab60 100644
.attr = {
.name = "data",
.mode = S_IRUGO,
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 713f1b3..0b1e1d5 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -280,7 +280,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+ static void __init paravirt_ops_setup(void)
+ {
+ pv_info.name = "KVM";
+- pv_info.paravirt_enabled = 1;
++
++ /*
++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
++ * guest kernel works like a bare metal kernel with additional
++ * features, and paravirt_enabled is about features that are
++ * missing.
++ */
++ pv_info.paravirt_enabled = 0;
+
+ if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ pv_cpu_ops.io_delay = kvm_io_delay;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index e604109..c8e98cd 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -263,7 +263,6 @@ void __init kvmclock_init(void)
+ #endif
+ kvm_get_preset_lpj();
+ clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
+- pv_info.paravirt_enabled = 1;
+ pv_info.name = "KVM";
+
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d..d851d32 100644
--- a/arch/x86/kernel/ldt.c
@@ -26289,6 +26399,30 @@ index 1b10af8..45bfbec 100644
};
EXPORT_SYMBOL_GPL(pv_time_ops);
+diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
+index a1da673..2c72d5b 100644
+--- a/arch/x86/kernel/paravirt_patch_64.c
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -9,7 +9,9 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
+ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
++#ifndef CONFIG_PAX_MEMORY_UDEREF
+ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
++#endif
+ DEF_NATIVE(pv_cpu_ops, clts, "clts");
+ DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
+
+@@ -57,7 +59,9 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ PATCH_SITE(pv_mmu_ops, read_cr3);
+ PATCH_SITE(pv_mmu_ops, write_cr3);
+ PATCH_SITE(pv_cpu_ops, clts);
++#ifndef CONFIG_PAX_MEMORY_UDEREF
+ PATCH_SITE(pv_mmu_ops, flush_tlb_single);
++#endif
+ PATCH_SITE(pv_cpu_ops, wbinvd);
+
+ patch_site:
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 299d493..2ccb0ee 100644
--- a/arch/x86/kernel/pci-calgary_64.c
@@ -27725,10 +27859,58 @@ index 24d3c91..d06b473 100644
return pc;
}
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
-index f7fec09..9991981 100644
+index f7fec09..d0f623f 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
-@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+@@ -27,6 +27,37 @@ static int get_free_idx(void)
+ return -ESRCH;
+ }
+
++static bool tls_desc_okay(const struct user_desc *info)
++{
++ if (LDT_empty(info))
++ return true;
++
++ /*
++ * espfix is required for 16-bit data segments, but espfix
++ * only works for LDT segments.
++ */
++ if (!info->seg_32bit)
++ return false;
++
++ /* Only allow data segments in the TLS array. */
++ if (info->contents > 1)
++ return false;
++
++ /*
++ * Non-present segments with DPL 3 present an interesting attack
++ * surface. The kernel should handle such segments correctly,
++ * but TLS is very difficult to protect in a sandbox, so prevent
++ * such segments from being created.
++ *
++ * If userspace needs to remove a TLS entry, it can still delete
++ * it outright.
++ */
++ if (info->seg_not_present)
++ return false;
++
++ return true;
++}
++
+ static void set_tls_desc(struct task_struct *p, int idx,
+ const struct user_desc *info, int n)
+ {
+@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
++ if (!tls_desc_okay(&info))
++ return -EINVAL;
++
+ if (idx == -1)
+ idx = info.entry_number;
+
+@@ -84,6 +118,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
@@ -27740,7 +27922,15 @@ index f7fec09..9991981 100644
set_tls_desc(p, idx, &info, 1);
return 0;
-@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+@@ -192,6 +231,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ {
+ struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+ const struct user_desc *info;
++ int i;
+
+ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ (pos % sizeof(struct user_desc)) != 0 ||
+@@ -200,11 +240,15 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
if (kbuf)
info = kbuf;
@@ -27749,6 +27939,14 @@ index f7fec09..9991981 100644
return -EFAULT;
else
info = infobuf;
+
++ for (i = 0; i < count / sizeof(struct user_desc); i++)
++ if (!tls_desc_okay(info + i))
++ return -EINVAL;
++
+ set_tls_desc(target,
+ GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
+ info, count / sizeof(struct user_desc));
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 1c113db..287b42e 100644
--- a/arch/x86/kernel/tracepoint.c
@@ -28534,6 +28732,19 @@ index c697625..a032162 100644
return 0;
out:
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 38d3751..e6fcffb 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3436,7 +3436,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ if (efer & EFER_LMA)
+- rsvd = CR3_L_MODE_RESERVED_BITS;
++ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
+ else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
+ rsvd = CR3_PAE_RESERVED_BITS;
+ else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 453e5fb..214168f 100644
--- a/arch/x86/kvm/lapic.c
@@ -28786,10 +28997,19 @@ index 0c90f4b..9fca4d7 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index fab97ad..394306f 100644
+index fab97ad..bb69607 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1806,8 +1806,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -688,6 +688,8 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
+
+ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ {
++ cr3 &= ~CR3_PCID_INVD;
++
+ if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
+ kvm_mmu_sync_roots(vcpu);
+ kvm_mmu_flush_tlb(vcpu);
+@@ -1806,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -28800,7 +29020,7 @@ index fab97ad..394306f 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2718,6 +2718,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2718,6 +2720,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -28809,7 +29029,7 @@ index fab97ad..394306f 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5532,7 +5534,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -5532,7 +5536,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -41111,10 +41331,10 @@ index 4050450..f67c5c1 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index b6fb3eb..e0fa1e1 100644
+index c514690..84df88f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -10798,13 +10798,13 @@ struct intel_quirk {
+@@ -10796,13 +10796,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -41130,7 +41350,7 @@ index b6fb3eb..e0fa1e1 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -10812,18 +10812,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -10810,18 +10810,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -41928,6 +42148,159 @@ index dbc2def..0a9f710 100644
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index cf4bad2..3d50d64 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -54,7 +54,7 @@
+
+ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+ #define SMALL_ALLOCATION 16
+-#define FREE_ALL_PAGES (~0U)
++#define FREE_ALL_PAGES (~0UL)
+ /* times are in msecs */
+ #define PAGE_FREE_INTERVAL 1000
+
+@@ -299,14 +299,13 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ * @free_all: If set to true will free all pages in pool
+ * @gfp: GFP flags.
+ **/
+-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
++static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
+ gfp_t gfp)
+ {
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+- unsigned freed_pages = 0,
+- npages_to_free = nr_free;
++ unsigned long freed_pages = 0, npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+@@ -366,7 +365,8 @@ restart:
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+- nr_free -= freed_pages;
++ if (likely(nr_free != FREE_ALL_PAGES))
++ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+@@ -395,7 +395,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ unsigned i;
+ unsigned pool_offset;
+ struct ttm_page_pool *pool;
+- int shrink_pages = sc->nr_to_scan;
++ unsigned long shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
+
+ if (!mutex_trylock(&lock))
+@@ -403,7 +403,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ pool_offset = ++start_pool % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+- unsigned nr_free = shrink_pages;
++ unsigned long nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+@@ -669,7 +669,7 @@ out:
+ }
+
+ /* Put all pages in pages list to correct pool to wait for reuse */
+-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
++static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
+ enum ttm_caching_state cstate)
+ {
+ unsigned long irq_flags;
+@@ -724,7 +724,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ struct list_head plist;
+ struct page *p = NULL;
+ gfp_t gfp_flags = GFP_USER;
+- unsigned count;
++ unsigned long count;
+ int r;
+
+ /* set zero flag for page allocation if required */
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index ca65df1..4f0024b 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -56,7 +56,7 @@
+
+ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+ #define SMALL_ALLOCATION 4
+-#define FREE_ALL_PAGES (~0U)
++#define FREE_ALL_PAGES (~0UL)
+ /* times are in msecs */
+ #define IS_UNDEFINED (0)
+ #define IS_WC (1<<1)
+@@ -413,15 +413,14 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+ * @nr_free: If set to true will free all pages in pool
+ * @gfp: GFP flags.
+ **/
+-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
++static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
+ gfp_t gfp)
+ {
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+- unsigned freed_pages = 0,
+- npages_to_free = nr_free;
++ unsigned long freed_pages = 0, npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+@@ -494,7 +493,8 @@ restart:
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+- nr_free -= freed_pages;
++ if (likely(nr_free != FREE_ALL_PAGES))
++ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+@@ -928,7 +928,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+- unsigned count = 0, i, npages = 0;
++ unsigned long count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+@@ -1005,7 +1005,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ static unsigned start_pool;
+ unsigned idx = 0;
+ unsigned pool_offset;
+- unsigned shrink_pages = sc->nr_to_scan;
++ unsigned long shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+ unsigned long freed = 0;
+
+@@ -1018,7 +1018,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ goto out;
+ pool_offset = ++start_pool % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+- unsigned nr_free;
++ unsigned long nr_free;
+
+ if (!p->dev)
+ continue;
+@@ -1032,7 +1032,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ sc->gfp_mask);
+ freed += nr_free - shrink_pages;
+
+- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
++ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid,
+ nr_free, shrink_pages);
+ }
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbadd49..1b7457b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
@@ -43811,6 +44184,34 @@ index c9a02fe..0debc75 100644
kref_init(&serio_raw->kref);
INIT_LIST_HEAD(&serio_raw->client_list);
init_waitqueue_head(&serio_raw->wait);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 9cbef59..76d5cd3 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -878,11 +878,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+
+ static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+ {
++ phys_addr_t physaddr;
+ WARN_ON(address & 0x7ULL);
+
+ memset(cmd, 0, sizeof(*cmd));
+- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
+- cmd->data[1] = upper_32_bits(__pa(address));
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ if (object_starts_on_stack(address)) {
++ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
++ physaddr = __pa((u64)adjbuf);
++ } else
++#endif
++ physaddr = __pa(address);
++
++ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
++ cmd->data[1] = upper_32_bits(physaddr);
+ cmd->data[2] = 1;
+ CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+ }
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e5555fc..937986d 100644
--- a/drivers/iommu/iommu.c
@@ -47251,6 +47652,19 @@ index 70651f8..7eb1bdf 100644
.kind = "bond",
.priv_size = sizeof(struct bonding),
.setup = bond_setup,
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index 5e40a8b..126bfda 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -1445,7 +1445,7 @@ err:
+ return -ENODEV;
+ }
+
+-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
++static struct rtnl_link_ops caif_hsi_link_ops = {
+ .kind = "cfhsi",
+ .priv_size = sizeof(struct cfhsi),
+ .setup = cfhsi_setup,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95d..d447b88 100644
--- a/drivers/net/can/Kconfig
@@ -47264,6 +47678,45 @@ index 9e7d95d..d447b88 100644
---help---
Say Y here if you want to support for Freescale FlexCAN.
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index cc11f7f..bf7de8b 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -756,7 +756,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
+ return -EOPNOTSUPP;
+ }
+
+-static struct rtnl_link_ops can_link_ops __read_mostly = {
++static struct rtnl_link_ops can_link_ops = {
+ .kind = "can",
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 4e94057..32032ff 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -166,7 +166,7 @@ static void vcan_setup(struct net_device *dev)
+ dev->destructor = free_netdev;
+ }
+
+-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
++static struct rtnl_link_ops vcan_link_ops = {
+ .kind = "vcan",
+ .setup = vcan_setup,
+ };
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index bd8f84b..68ba9f1 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -155,7 +155,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
++static struct rtnl_link_ops dummy_link_ops = {
+ .kind = "dummy",
+ .setup = dummy_setup,
+ .validate = dummy_validate,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c3..3353ee7 100644
--- a/drivers/net/ethernet/8390/ax88796.c
@@ -47726,6 +48179,19 @@ index 6adbef8..cd6a5f1 100644
priv = netdev_priv(dev);
priv->phy = phy;
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index d7b2e94..0812ae9 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -252,7 +252,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
++static struct rtnl_link_ops ifb_link_ops = {
+ .kind = "ifb",
+ .priv_size = sizeof(struct ifb_private),
+ .setup = ifb_setup,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fbf7dcd..ad71499 100644
--- a/drivers/net/macvlan.c
@@ -47793,6 +48259,19 @@ index 07c942b..bce8b8a 100644
.notifier_call = macvtap_device_event,
};
+diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
+index d2bb12b..d6c921e 100644
+--- a/drivers/net/nlmon.c
++++ b/drivers/net/nlmon.c
+@@ -162,7 +162,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
++static struct rtnl_link_ops nlmon_link_ops = {
+ .kind = "nlmon",
+ .priv_size = sizeof(struct nlmon),
+ .setup = nlmon_setup,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5a1897d..e860630 100644
--- a/drivers/net/ppp/ppp_generic.c
@@ -47829,9 +48308,18 @@ index 1252d9c..80e660b 100644
/* We've got a compressed packet; read the change byte */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
-index 979fe43..1f1230c 100644
+index 979fe43..3f92d61 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
+@@ -2086,7 +2086,7 @@ static unsigned int team_get_num_rx_queues(void)
+ return TEAM_DEFAULT_NUM_RX_QUEUES;
+ }
+
+-static struct rtnl_link_ops team_link_ops __read_mostly = {
++static struct rtnl_link_ops team_link_ops = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
@@ -2874,7 +2874,7 @@ static int team_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -47842,9 +48330,18 @@ index 979fe43..1f1230c 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index ec63314..17810e8 100644
+index ec63314..465e154 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
+@@ -1436,7 +1436,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+ return -EINVAL;
+ }
+
+-static struct rtnl_link_ops tun_link_ops __read_mostly = {
++static struct rtnl_link_ops tun_link_ops = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct tun_struct),
+ .setup = tun_setup,
@@ -1882,7 +1882,7 @@ unlock:
}
@@ -48004,10 +48501,10 @@ index 841b608..198a8b7 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 5441b49..d8030d2 100644
+index 5988910..be561a2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -2855,7 +2855,7 @@ nla_put_failure:
+@@ -2851,7 +2851,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -48016,7 +48513,7 @@ index 5441b49..d8030d2 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2902,7 +2902,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2898,7 +2898,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -48892,29 +49389,6 @@ index a912dc0..a8225ba 100644
u16 int_num;
ZD_ASSERT(in_interrupt());
-diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
-index e30d800..19db057 100644
---- a/drivers/net/xen-netfront.c
-+++ b/drivers/net/xen-netfront.c
-@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
- len = skb_frag_size(frag);
- offset = frag->page_offset;
-
-- /* Data must not cross a page boundary. */
-- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
--
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
-@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
- while (len > 0) {
- unsigned long bytes;
-
-- BUG_ON(offset >= PAGE_SIZE);
--
- bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 683671a..4519fc2 100644
--- a/drivers/nfc/nfcwilink.c
@@ -61003,6 +61477,33 @@ index 9f9992b..8b59411 100644
return 0;
}
return 1;
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 20d6697..f77da76 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -264,10 +264,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT2_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+@@ -841,8 +839,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT2_FS_XATTR
+- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (def_mount_opts & EXT2_DEFM_ACL)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 9142614..97484fa 100644
--- a/fs/ext2/xattr.c
@@ -61045,6 +61546,33 @@ index 22548f5..41521d8 100644
return 0;
}
return 1;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 0498390..df00300 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -649,10 +649,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT3_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (test_opt(sb, POSIX_ACL))
+@@ -1749,8 +1747,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT3_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT3_FS_XATTR
+- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (def_mount_opts & EXT3_DEFM_ACL)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index c6874be..f8a6ae8 100644
--- a/fs/ext3/xattr.c
@@ -63115,6 +63643,37 @@ index e846a32..bb06bd0 100644
put_cpu_var(last_ino);
return res;
}
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index f488bba..bb63254 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -30,6 +30,7 @@ struct rock_state {
+ int cont_size;
+ int cont_extent;
+ int cont_offset;
++ int cont_loops;
+ struct inode *inode;
+ };
+
+@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
+ rs->inode = inode;
+ }
+
++/* Maximum number of Rock Ridge continuation entries */
++#define RR_MAX_CE_ENTRIES 32
++
+ /*
+ * Returns 0 if the caller should continue scanning, 1 if the scan must end
+ * and -ve on error.
+@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
+ goto out;
+ }
+ ret = -EIO;
++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
++ goto out;
+ bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
+ if (bh) {
+ memcpy(rs->buffer, bh->b_data + rs->cont_offset,
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 4a6cf28..d3a29d3 100644
--- a/fs/jffs2/erase.c
@@ -63966,10 +64525,19 @@ index 0dd72c8..34dd17d 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index d9bf3ef..93207ab 100644
+index d9bf3ef..359b08c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -1371,6 +1371,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1295,6 +1295,8 @@ void umount_tree(struct mount *mnt, int how)
+ }
+ if (last) {
+ last->mnt_hash.next = unmounted.first;
++ if (unmounted.first)
++ unmounted.first->pprev = &last->mnt_hash.next;
+ unmounted.first = tmp_list.first;
+ unmounted.first->pprev = &unmounted.first;
+ }
+@@ -1371,6 +1373,9 @@ static int do_umount(struct mount *mnt, int flags)
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
up_write(&sb->s_umount);
@@ -63979,7 +64547,7 @@ index d9bf3ef..93207ab 100644
return retval;
}
-@@ -1393,6 +1396,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1393,6 +1398,9 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
@@ -63989,7 +64557,7 @@ index d9bf3ef..93207ab 100644
return retval;
}
-@@ -1412,7 +1418,7 @@ static inline bool may_mount(void)
+@@ -1412,7 +1420,7 @@ static inline bool may_mount(void)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
@@ -63998,7 +64566,7 @@ index d9bf3ef..93207ab 100644
{
struct path path;
struct mount *mnt;
-@@ -1454,7 +1460,7 @@ out:
+@@ -1454,7 +1462,7 @@ out:
/*
* The 2.0 compatible umount. No flags.
*/
@@ -64007,7 +64575,7 @@ index d9bf3ef..93207ab 100644
{
return sys_umount(name, 0);
}
-@@ -2503,6 +2509,16 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2503,6 +2511,16 @@ long do_mount(const char *dev_name, const char *dir_name,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
@@ -64024,7 +64592,7 @@ index d9bf3ef..93207ab 100644
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
-@@ -2517,6 +2533,9 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2517,6 +2535,9 @@ long do_mount(const char *dev_name, const char *dir_name,
dev_name, data_page);
dput_out:
path_put(&path);
@@ -64034,7 +64602,7 @@ index d9bf3ef..93207ab 100644
return retval;
}
-@@ -2534,7 +2553,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2534,7 +2555,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
* number incrementing at 10Ghz will take 12,427 years to wrap which
* is effectively never, so we can ignore the possibility.
*/
@@ -64043,7 +64611,7 @@ index d9bf3ef..93207ab 100644
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
-@@ -2549,7 +2568,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2549,7 +2570,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
kfree(new_ns);
return ERR_PTR(ret);
}
@@ -64052,7 +64620,7 @@ index d9bf3ef..93207ab 100644
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
-@@ -2559,7 +2578,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2559,7 +2580,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
@@ -64061,7 +64629,7 @@ index d9bf3ef..93207ab 100644
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
-@@ -2680,8 +2699,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2680,8 +2701,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
@@ -64072,7 +64640,7 @@ index d9bf3ef..93207ab 100644
{
int ret;
char *kernel_type;
-@@ -2794,6 +2813,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2794,6 +2815,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (error)
goto out2;
@@ -64084,7 +64652,7 @@ index d9bf3ef..93207ab 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -3065,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+@@ -3065,7 +3091,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -66900,6 +67468,21 @@ index 8d06adf..7e1c9f8 100644
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
#define __fs_changed(gen,s) (gen != get_generation (s))
#define fs_changed(gen,s) \
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 2c80335..04d987d 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1783,6 +1783,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+ sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+ sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
++#ifdef CONFIG_REISERFS_FS_XATTR
++ /* turn on user xattrs by default */
++ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
++#endif
+ /* no preallocation minimum, be smart in
+ reiserfs_file_write instead */
+ sbi->s_alloc_options.preallocmin = 0;
diff --git a/fs/select.c b/fs/select.c
index 467bb1c..cf9d65a 100644
--- a/fs/select.c
@@ -84001,7 +84584,7 @@ index a964f72..b475afb 100644
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 218b058..1ce7ad0 100644
+index 218b058..7a1fb15 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -133,6 +133,7 @@ struct fs_struct;
@@ -84158,6 +84741,15 @@ index 218b058..1ce7ad0 100644
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
+@@ -1556,7 +1599,7 @@ struct task_struct {
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+- atomic_t trace_overrun;
++ atomic_unchecked_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+ #endif
@@ -1588,7 +1631,78 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
@@ -93000,7 +93592,7 @@ index 4f3a3c03..04b7886 100644
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index e3be87e..7480b36 100644
+index e3be87e..abc908f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1965,12 +1965,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
@@ -93043,6 +93635,15 @@ index e3be87e..7480b36 100644
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
+@@ -4933,7 +4938,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+
+ if (t->ret_stack == NULL) {
+ atomic_set(&t->tracing_graph_pause, 0);
+- atomic_set(&t->trace_overrun, 0);
++ atomic_set_unchecked(&t->trace_overrun, 0);
+ t->curr_ret_stack = -1;
+ /* Make sure the tasks see the -1 first: */
+ smp_wmb();
@@ -5067,6 +5072,10 @@ static void update_function_graph_func(void)
ftrace_graph_entry = ftrace_graph_entry_test;
}
@@ -93062,6 +93663,15 @@ index e3be87e..7480b36 100644
register_pm_notifier(&ftrace_suspend_notifier);
ftrace_graph_active++;
+@@ -5134,7 +5142,7 @@ static void
+ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+ {
+ atomic_set(&t->tracing_graph_pause, 0);
+- atomic_set(&t->trace_overrun, 0);
++ atomic_set_unchecked(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visible before we add the ret_stack */
+ smp_wmb();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 774a080..7fa60b1 100644
--- a/kernel/trace/ring_buffer.c
@@ -93398,6 +94008,28 @@ index e4c4efc..ef4e975 100644
static void __add_event_to_tracers(struct ftrace_event_call *call);
/* Add an additional event_call dynamically */
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 0b99120..881174f 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -110,7 +110,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+
+ /* The return trace stack is full */
+ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+- atomic_inc(&current->trace_overrun);
++ atomic_inc_unchecked(&current->trace_overrun);
+ return -EBUSY;
+ }
+
+@@ -207,7 +207,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
+ *ret = current->ret_stack[index].ret;
+ trace->func = current->ret_stack[index].func;
+ trace->calltime = current->ret_stack[index].calltime;
+- trace->overrun = atomic_read(&current->trace_overrun);
++ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
+ trace->depth = index;
+ }
+
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 0abd9b8..6a663a2 100644
--- a/kernel/trace/trace_mmiotrace.c
@@ -95073,7 +95705,7 @@ index a98c7fc..393f8f1 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 492e36f..732f880 100644
+index 48d7365..732f880 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -95123,39 +95755,6 @@ index 492e36f..732f880 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -808,20 +814,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
-- if (swap_duplicate(entry) < 0)
-- return entry.val;
-+ if (likely(!non_swap_entry(entry))) {
-+ if (swap_duplicate(entry) < 0)
-+ return entry.val;
-
-- /* make sure dst_mm is on swapoff's mmlist. */
-- if (unlikely(list_empty(&dst_mm->mmlist))) {
-- spin_lock(&mmlist_lock);
-- if (list_empty(&dst_mm->mmlist))
-- list_add(&dst_mm->mmlist,
-- &src_mm->mmlist);
-- spin_unlock(&mmlist_lock);
-- }
-- if (likely(!non_swap_entry(entry)))
-+ /* make sure dst_mm is on swapoff's mmlist. */
-+ if (unlikely(list_empty(&dst_mm->mmlist))) {
-+ spin_lock(&mmlist_lock);
-+ if (list_empty(&dst_mm->mmlist))
-+ list_add(&dst_mm->mmlist,
-+ &src_mm->mmlist);
-+ spin_unlock(&mmlist_lock);
-+ }
- rss[MM_SWAPENTS]++;
-- else if (is_migration_entry(entry)) {
-+ } else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
@@ -1137,8 +1143,10 @@ again:
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
@@ -95991,7 +96590,7 @@ index b1eb536..091d154 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index dfe90657..390920e 100644
+index b91ac80..390920e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -37,6 +37,7 @@
@@ -96077,21 +96676,7 @@ index dfe90657..390920e 100644
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -745,8 +773,11 @@ again: remove_next = 1 + (end > next->vm_end);
- * shrinking vma had, to cover any anon pages imported.
- */
- if (exporter && exporter->anon_vma && !importer->anon_vma) {
-- if (anon_vma_clone(importer, exporter))
-- return -ENOMEM;
-+ int error;
-+
-+ error = anon_vma_clone(importer, exporter);
-+ if (error)
-+ return error;
- importer->anon_vma = exporter->anon_vma;
- }
- }
-@@ -942,6 +973,12 @@ static int
+@@ -945,6 +973,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -96104,7 +96689,7 @@ index dfe90657..390920e 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -961,6 +998,12 @@ static int
+@@ -964,6 +998,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -96117,7 +96702,7 @@ index dfe90657..390920e 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
-@@ -1003,13 +1046,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -1006,13 +1046,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -96139,7 +96724,7 @@ index dfe90657..390920e 100644
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -1025,6 +1075,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1028,6 +1075,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -96155,7 +96740,7 @@ index dfe90657..390920e 100644
/*
* Can it merge with the predecessor?
*/
-@@ -1044,9 +1103,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1047,9 +1103,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -96181,7 +96766,7 @@ index dfe90657..390920e 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(prev);
-@@ -1060,12 +1134,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1063,12 +1134,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -96211,7 +96796,7 @@ index dfe90657..390920e 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(area);
-@@ -1174,8 +1263,10 @@ none:
+@@ -1177,8 +1263,10 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -96224,7 +96809,7 @@ index dfe90657..390920e 100644
mm->total_vm += pages;
-@@ -1183,7 +1274,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+@@ -1186,7 +1274,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
mm->exec_vm += pages;
@@ -96233,7 +96818,7 @@ index dfe90657..390920e 100644
mm->stack_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -1213,6 +1304,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
+@@ -1216,6 +1304,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -96241,7 +96826,7 @@ index dfe90657..390920e 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1239,7 +1331,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1242,7 +1331,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -96250,7 +96835,7 @@ index dfe90657..390920e 100644
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -1265,7 +1357,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1268,7 +1357,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -96259,7 +96844,7 @@ index dfe90657..390920e 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1276,6 +1368,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1279,6 +1368,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -96303,7 +96888,7 @@ index dfe90657..390920e 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1363,6 +1492,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1366,6 +1492,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -96313,7 +96898,7 @@ index dfe90657..390920e 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1456,7 +1588,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1459,7 +1588,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -96322,7 +96907,7 @@ index dfe90657..390920e 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1502,7 +1634,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1505,7 +1634,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
@@ -96345,7 +96930,7 @@ index dfe90657..390920e 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1521,11 +1668,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1524,11 +1668,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -96358,7 +96943,7 @@ index dfe90657..390920e 100644
}
/*
-@@ -1556,6 +1702,16 @@ munmap_back:
+@@ -1559,6 +1702,16 @@ munmap_back:
goto unacct_error;
}
@@ -96375,7 +96960,7 @@ index dfe90657..390920e 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1575,6 +1731,13 @@ munmap_back:
+@@ -1578,6 +1731,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -96389,7 +96974,7 @@ index dfe90657..390920e 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1608,6 +1771,12 @@ munmap_back:
+@@ -1611,6 +1771,12 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -96402,7 +96987,7 @@ index dfe90657..390920e 100644
/* Once vma denies write, undo our temporary denial count */
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
-@@ -1616,6 +1785,7 @@ out:
+@@ -1619,6 +1785,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -96410,7 +96995,7 @@ index dfe90657..390920e 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1648,6 +1818,12 @@ unmap_and_free_vma:
+@@ -1651,6 +1818,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -96423,7 +97008,7 @@ index dfe90657..390920e 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1655,7 +1831,63 @@ unacct_error:
+@@ -1658,7 +1831,63 @@ unacct_error:
return error;
}
@@ -96488,7 +97073,7 @@ index dfe90657..390920e 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1703,11 +1935,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1706,11 +1935,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -96519,7 +97104,7 @@ index dfe90657..390920e 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1757,7 +2007,7 @@ found:
+@@ -1760,7 +2007,7 @@ found:
return gap_start;
}
@@ -96528,7 +97113,7 @@ index dfe90657..390920e 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1811,6 +2061,24 @@ check_current:
+@@ -1814,6 +2061,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -96553,7 +97138,7 @@ index dfe90657..390920e 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1874,6 +2142,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1877,6 +2142,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -96561,7 +97146,7 @@ index dfe90657..390920e 100644
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
-@@ -1881,11 +2150,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1884,11 +2150,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -96578,7 +97163,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -1894,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1897,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
@@ -96586,7 +97171,7 @@ index dfe90657..390920e 100644
return vm_unmapped_area(&info);
}
#endif
-@@ -1912,6 +2186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1915,6 +2186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -96594,7 +97179,7 @@ index dfe90657..390920e 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
-@@ -1920,12 +2195,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1923,12 +2195,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -96612,7 +97197,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -1934,6 +2213,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1937,6 +2213,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -96620,7 +97205,7 @@ index dfe90657..390920e 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1946,6 +2226,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1949,6 +2226,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -96633,7 +97218,7 @@ index dfe90657..390920e 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -2046,6 +2332,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2049,6 +2332,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -96662,7 +97247,7 @@ index dfe90657..390920e 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2062,6 +2370,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2065,6 +2370,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -96670,7 +97255,7 @@ index dfe90657..390920e 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2072,6 +2381,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2075,6 +2381,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -96678,7 +97263,7 @@ index dfe90657..390920e 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2101,37 +2411,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2104,37 +2411,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -96736,7 +97321,7 @@ index dfe90657..390920e 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2166,6 +2487,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2169,6 +2487,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -96745,7 +97330,7 @@ index dfe90657..390920e 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
-@@ -2180,6 +2503,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2183,6 +2503,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -96754,7 +97339,7 @@ index dfe90657..390920e 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2193,6 +2518,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2196,6 +2518,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -96770,7 +97355,7 @@ index dfe90657..390920e 100644
vma_lock_anon_vma(vma);
/*
-@@ -2202,9 +2536,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2205,9 +2536,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -96789,7 +97374,7 @@ index dfe90657..390920e 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2229,13 +2571,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2232,13 +2571,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -96817,7 +97402,7 @@ index dfe90657..390920e 100644
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
-@@ -2333,6 +2689,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2336,6 +2689,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -96831,7 +97416,7 @@ index dfe90657..390920e 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2377,6 +2740,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2380,6 +2740,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -96848,7 +97433,7 @@ index dfe90657..390920e 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2404,14 +2777,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2407,14 +2777,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -96882,7 +97467,7 @@ index dfe90657..390920e 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2424,11 +2816,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2427,6 +2816,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -96905,14 +97490,7 @@ index dfe90657..390920e 100644
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
-
-- if (anon_vma_clone(new, vma))
-+ err = anon_vma_clone(new, vma);
-+ if (err)
- goto out_free_mpol;
-
- if (new->vm_file)
-@@ -2443,6 +2852,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2447,6 +2852,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -96951,7 +97529,7 @@ index dfe90657..390920e 100644
/* Success. */
if (!err)
return 0;
-@@ -2452,10 +2893,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2456,10 +2893,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -96971,7 +97549,7 @@ index dfe90657..390920e 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2468,6 +2917,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2472,6 +2917,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -96987,7 +97565,7 @@ index dfe90657..390920e 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2479,11 +2937,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2483,11 +2937,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -97018,7 +97596,7 @@ index dfe90657..390920e 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2558,6 +3035,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2562,6 +3035,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -97027,7 +97605,7 @@ index dfe90657..390920e 100644
return 0;
}
-@@ -2566,6 +3045,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2570,6 +3045,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -97041,7 +97619,7 @@ index dfe90657..390920e 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2579,16 +3065,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2583,16 +3065,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -97058,7 +97636,7 @@ index dfe90657..390920e 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2602,6 +3078,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2606,6 +3078,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -97066,7 +97644,7 @@ index dfe90657..390920e 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2609,10 +3086,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2613,10 +3086,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -97091,7 +97669,7 @@ index dfe90657..390920e 100644
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
-@@ -2626,21 +3117,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2630,21 +3117,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -97116,7 +97694,7 @@ index dfe90657..390920e 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2654,7 +3144,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2658,7 +3144,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -97125,7 +97703,7 @@ index dfe90657..390920e 100644
return -ENOMEM;
}
-@@ -2668,10 +3158,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2672,10 +3158,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -97139,7 +97717,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -2733,6 +3224,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2737,6 +3224,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -97147,7 +97725,7 @@ index dfe90657..390920e 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2750,6 +3242,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2754,6 +3242,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -97161,7 +97739,7 @@ index dfe90657..390920e 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2773,7 +3272,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2777,7 +3272,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -97183,7 +97761,7 @@ index dfe90657..390920e 100644
return 0;
}
-@@ -2792,6 +3305,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2796,6 +3305,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
@@ -97192,7 +97770,7 @@ index dfe90657..390920e 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2856,6 +3371,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2860,6 +3371,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -97232,7 +97810,7 @@ index dfe90657..390920e 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2867,6 +3415,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2871,6 +3415,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -97240,7 +97818,7 @@ index dfe90657..390920e 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2937,6 +3486,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2941,6 +3486,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -97903,7 +98481,7 @@ index fd26d04..0cea1b0 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index cdbd312..cb05259 100644
+index cab9820..cb05259 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
@@ -97983,7 +98561,7 @@ index cdbd312..cb05259 100644
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
-@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
@@ -97992,23 +98570,7 @@ index cdbd312..cb05259 100644
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
-+ int error;
-
- /* Don't bother if the parent process has no anon_vma here. */
- if (!pvma->anon_vma)
-@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
- * First, attach the new VMA to the parent VMA's anon_vmas,
- * so rmap can find non-COWed pages in child processes.
- */
-- if (anon_vma_clone(vma, pvma))
-- return -ENOMEM;
-+ error = anon_vma_clone(vma, pvma);
-+ if (error)
-+ return error;
-
- /* Then add our own anon_vma. */
- anon_vma = anon_vma_alloc();
-@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
+@@ -376,8 +410,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
@@ -99576,34 +100138,6 @@ index 0fdf968..991ff6a 100644
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
-diff --git a/mm/vmpressure.c b/mm/vmpressure.c
-index d4042e7..c5afd57 100644
---- a/mm/vmpressure.c
-+++ b/mm/vmpressure.c
-@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
- unsigned long scanned;
- unsigned long reclaimed;
-
-+ spin_lock(&vmpr->sr_lock);
- /*
- * Several contexts might be calling vmpressure(), so it is
- * possible that the work was rescheduled again before the old
-@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
- * here. No need for any locks here since we don't care if
- * vmpr->reclaimed is in sync.
- */
-- if (!vmpr->scanned)
-+ scanned = vmpr->scanned;
-+ if (!scanned) {
-+ spin_unlock(&vmpr->sr_lock);
- return;
-+ }
-
-- spin_lock(&vmpr->sr_lock);
-- scanned = vmpr->scanned;
- reclaimed = vmpr->reclaimed;
- vmpr->scanned = 0;
- vmpr->reclaimed = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index def5dd2..4ce55cec 100644
--- a/mm/vmstat.c
@@ -99731,6 +100265,19 @@ index 44ebd5c..1f732bae 100644
struct vlan_net *vn;
vn = net_generic(net, vlan_net_id);
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index c7e634a..041cbdb 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -238,7 +238,7 @@ nla_put_failure:
+ return -EMSGSIZE;
+ }
+
+-struct rtnl_link_ops vlan_link_ops __read_mostly = {
++struct rtnl_link_ops vlan_link_ops = {
+ .kind = "vlan",
+ .maxtype = IFLA_VLAN_MAX,
+ .policy = vlan_policy,
diff --git a/net/9p/client.c b/net/9p/client.c
index 9186550..e604a2f 100644
--- a/net/9p/client.c
@@ -100036,7 +100583,7 @@ index c46387a..6ad5ef9 100644
frag_header.no = 0;
frag_header.total_size = htons(skb->len);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
-index f82c267..0e56d32 100644
+index f82c267..8a27a34 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -283,7 +283,7 @@ send:
@@ -100066,6 +100613,15 @@ index f82c267..0e56d32 100644
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
+@@ -929,7 +929,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
+ return 0;
+ }
+
+-struct rtnl_link_ops batadv_link_ops __read_mostly = {
++struct rtnl_link_ops batadv_link_ops = {
+ .kind = "batadv",
+ .priv_size = sizeof(struct batadv_priv),
+ .setup = batadv_softif_init_early,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 78370ab..1cb3614 100644
--- a/net/batman-adv/types.h
@@ -100097,6 +100653,34 @@ index 78370ab..1cb3614 100644
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
char num_ifaces;
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index a841d3e..c7a19a1 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
+
+ BT_DBG("");
+
++ if (!l2cap_is_socket(sock))
++ return -EBADFD;
++
+ baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
+ baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
+
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index 67fe5e8..278a194 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
+
+ BT_DBG("");
+
++ if (!l2cap_is_socket(sock))
++ return -EBADFD;
++
+ session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 7552f9e..074ce29 100644
--- a/net/bluetooth/hci_sock.c
@@ -100110,6 +100694,26 @@ index 7552f9e..074ce29 100644
if (copy_from_user(&uf, optval, len)) {
err = -EFAULT;
break;
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index d9fb934..6134618 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -1332,13 +1332,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
+ {
+ struct hidp_session *session;
+ struct l2cap_conn *conn;
+- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
++ struct l2cap_chan *chan;
+ int ret;
+
+ ret = hidp_verify_sockets(ctrl_sock, intr_sock);
+ if (ret)
+ return ret;
+
++ chan = l2cap_pi(ctrl_sock->sk)->chan;
+ conn = NULL;
+ l2cap_chan_lock(chan);
+ if (chan->conn) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 0007c9e..f11541b 100644
--- a/net/bluetooth/l2cap_core.c
@@ -100222,6 +100826,19 @@ index f9c0980a..fcbbfeb 100644
tty_port_close(&dev->port, tty, filp);
}
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index e8844d9..df3afa0 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -482,7 +482,7 @@ static struct rtnl_af_ops br_af_ops = {
+ .get_link_af_size = br_get_link_af_size,
+ };
+
+-struct rtnl_link_ops br_link_ops __read_mostly = {
++struct rtnl_link_ops br_link_ops = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1059ed3..d70846a 100644
--- a/net/bridge/netfilter/ebtables.c
@@ -100296,6 +100913,19 @@ index 0f45522..dab651f 100644
p->sequence_no);
list_del(&p->list);
goto out;
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
+index 4589ff67..46d6b8f 100644
+--- a/net/caif/chnl_net.c
++++ b/net/caif/chnl_net.c
+@@ -516,7 +516,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ };
+
+
+-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
++static struct rtnl_link_ops ipcaif_link_ops = {
+ .kind = "caif",
+ .priv_size = sizeof(struct chnl_net),
+ .setup = ipcaif_net_setup,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index a27f8aa..67174a3 100644
--- a/net/can/af_can.c
@@ -100931,7 +101561,7 @@ index fdac61c..e5e5b46 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index b0db904..dc1f9f2 100644
+index 4617586..d6ea668 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -100969,7 +101599,7 @@ index b0db904..dc1f9f2 100644
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
-@@ -2684,6 +2687,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2685,6 +2688,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -100979,7 +101609,7 @@ index b0db904..dc1f9f2 100644
have_flags = true;
flags = nla_get_u16(attr);
break;
-@@ -2754,6 +2760,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2755,6 +2761,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -101399,6 +102029,32 @@ index 5325b54..a0d4d69 100644
return -EFAULT;
*lenp = len;
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index 01a5261..29cea68 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -86,7 +86,7 @@ nla_put_failure:
+ return -EMSGSIZE;
+ }
+
+-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
++static struct rtnl_link_ops hsr_link_ops = {
+ .kind = "hsr",
+ .maxtype = IFLA_HSR_MAX,
+ .policy = hsr_policy,
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 8edfea5..a17998f 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -714,7 +714,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+ dev_put(real_dev);
+ }
+
+-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
++static struct rtnl_link_ops lowpan_link_ops = {
+ .kind = "lowpan",
+ .priv_size = sizeof(struct lowpan_dev_info),
+ .setup = lowpan_setup,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 07bd8ed..c574801 100644
--- a/net/ipv4/af_inet.c
@@ -101560,7 +102216,7 @@ index 017fa5e..d61ebac 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index 8c8493e..d5214a4 100644
+index 278836f..482db7b 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -56,13 +56,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
@@ -101767,7 +102423,7 @@ index 580dd96..9fcef7e 100644
msg.msg_flags = flags;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
-index e4a8f76..dd8ad72 100644
+index b0a9cb4..8e8f8d2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,7 +44,7 @@
@@ -102819,7 +103475,7 @@ index 7b32652..0bc348b 100644
table = kmemdup(ipv6_icmp_table_template,
sizeof(ipv6_icmp_table_template),
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
-index b27f6d3..1a2977b 100644
+index 4a230b1..a1d47b8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,7 +71,7 @@ struct ip6gre_net {
@@ -102893,7 +103549,7 @@ index 657639d..8b609c5 100644
.maxtype = IFLA_IPTUN_MAX,
.policy = ip6_tnl_policy,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
-index 9a5339f..8fc3c37 100644
+index 28456c9..13a4115 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -102905,7 +103561,7 @@ index 9a5339f..8fc3c37 100644
static int vti6_net_id __read_mostly;
struct vti6_net {
-@@ -892,7 +892,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+@@ -901,7 +901,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
};
diff --git a/main/linux-grsec/net-gre-Set-inner-mac-header-in-gro-complete.patch b/main/linux-grsec/net-gre-Set-inner-mac-header-in-gro-complete.patch
deleted file mode 100644
index df96a460c5..0000000000
--- a/main/linux-grsec/net-gre-Set-inner-mac-header-in-gro-complete.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index bb5947b..51973dd 100644
---- a/net/ipv4/gre_offload.c
-+++ b/net/ipv4/gre_offload.c
-@@ -247,6 +247,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
- err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
-
- rcu_read_unlock();
-+
-+ skb_set_inner_mac_header(skb, nhoff + grehlen);
-+
- return err;
- }
-