aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
Diffstat (limited to 'main')
-rw-r--r--main/dahdi-linux-grsec/APKBUILD2
-rw-r--r--main/linux-grsec/APKBUILD16
-rw-r--r--main/linux-grsec/grsecurity-2.9.1-3.10.9-201308202015.patch (renamed from main/linux-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch)1059
3 files changed, 406 insertions, 671 deletions
diff --git a/main/dahdi-linux-grsec/APKBUILD b/main/dahdi-linux-grsec/APKBUILD
index 39109296d1..9fed392379 100644
--- a/main/dahdi-linux-grsec/APKBUILD
+++ b/main/dahdi-linux-grsec/APKBUILD
@@ -3,7 +3,7 @@
_flavor=grsec
_kpkg=linux-$_flavor
-_kver=3.10.7
+_kver=3.10.9
_kpkgrel=0
_mypkgrel=0
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 6bb238d2cf..e61309b237 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,7 +2,7 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.10.7
+pkgver=3.10.9
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-2.9.1-3.10.7-201308171249.patch
+ grsecurity-2.9.1-3.10.9-201308202015.patch
0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
@@ -149,8 +149,8 @@ dev() {
}
md5sums="4f25cd5bec5f8d5a7d935b3f2ccb8481 linux-3.10.tar.xz
-6b1b6b62044fcf3624f067154d5c1666 patch-3.10.7.xz
-e8a352c746da4aaf2e14a89da6896023 grsecurity-2.9.1-3.10.7-201308171249.patch
+868d7f5315f95da5e48ed56691a36263 patch-3.10.9.xz
+f41e229103719b010213ae403c1b7afa grsecurity-2.9.1-3.10.9-201308202015.patch
a16f11b12381efb3bec79b9bfb329836 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
656ae7b10dd2f18dbfa1011041d08d60 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
aa454ffb96428586447775c21449e284 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
@@ -160,8 +160,8 @@ aa454ffb96428586447775c21449e284 0003-ipv4-properly-refresh-rtable-entries-on-p
1a111abaeb381bf47d9e979a85fba2ee kernelconfig.x86
1312267644d0c729bd7c7af979b29c8d kernelconfig.x86_64"
sha256sums="df27fa92d27a9c410bfe6c4a89f141638500d7eadcca5cce578954efc2ad3544 linux-3.10.tar.xz
-a92836d9ae477a7730c79d8ad521a2859ecdd8dea1ac0fa561fb5ce8517f5d1e patch-3.10.7.xz
-9424fb61b373fb3a84cdf0b82183ae4429158a8b582ef49a33af629557330e2a grsecurity-2.9.1-3.10.7-201308171249.patch
+851224c4719e362a8ae6785abd7a8e59aa7200ee82fffbea291003da47a64bf6 patch-3.10.9.xz
+a66590964d415c30643b25d50533c960b3dbd2f5c2e408d39f489b3b03ccefe6 grsecurity-2.9.1-3.10.9-201308202015.patch
6af3757ac36a6cd3cda7b0a71b08143726383b19261294a569ad7f4042c72df3 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
dc8e82108615657f1fb9d641efd42255a5761c06edde1b00a41ae0d314d548f0 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
0985caa0f3ee8ed0959aeaa4214f5f8057ae8e61d50dcae39194912d31e14892 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
@@ -171,8 +171,8 @@ fc613ac466610b866b721c41836fd5bfb2d4b75bceb67972dc6369d7f62ff47e 0006-ipv4-use-
1ef74cf3703dd26201970a2d9f043fed7e03ad2540a20f810cec8add93f81ccd kernelconfig.x86
1c4b4a74d982fdc8d3baddcdaa674ae4b4a3390daba024fca55e85604af74507 kernelconfig.x86_64"
sha512sums="5fb109fcbd59bf3dffc911b853894f0a84afa75151368f783a1252c5ff60c7a1504de216c0012be446df983e2dea400ad8eeed3ce04f24dc61d0ef76c174dc35 linux-3.10.tar.xz
-d34729cfca045f12077c44518171a5b933790b112f2576aa55ba7f6684567b04a6beea4da8a635dcc078a844f9cd47aa66ead1fd6d68b926fdc09ecb0ae34324 patch-3.10.7.xz
-1ddc7f9f28e5a8451a36b6cf800e173a59cbd2271aca772b24c568b77fa37997d0bd095e032ffb94d897a5e4d9ebc102e8eb69acb04a57f1938cd92fe98e306e grsecurity-2.9.1-3.10.7-201308171249.patch
+4e1ba00bd38ce248dc9bb160a4bba5800825c2ca8665134ffd56bbfffbfbe7a6acd58bae7fbb7a5490e22393c13a86c1a7937397710c27ca14bffb40ec0d809a patch-3.10.9.xz
+1deda71bdaa5ea1133b2d6bb5c7598d31fa98ac9252ce5eca631412e5cc2be45697bfa7e1ad87a36dfd1530540c21ed528227d7b3ef611624070c9b37faddc2a grsecurity-2.9.1-3.10.9-201308202015.patch
81e78593288e8b0fd2c03ea9fc1450323887707f087e911f172450a122bc9b591ee83394836789730d951aeec13d0b75a64e1c05f04364abf8f80d883ddc4a02 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
51ecb15b669f6a82940a13a38939116e003bf5dfd24496771c8279e907b72adcc63d607f0340a2940d757e12ddadb7d45c7af78ae311d284935a6296dbcac00c 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
57d0a8bd35d19cf657ded58efe24517d2252aec6984040713ba173a34edb5887ececaa2985076bc6a149eaa57639fd98a042c1c2d226ed4ad8dd5ed0e230717e 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
diff --git a/main/linux-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch b/main/linux-grsec/grsecurity-2.9.1-3.10.9-201308202015.patch
index 9a72c3e12c..24d81a08cf 100644
--- a/main/linux-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch
+++ b/main/linux-grsec/grsecurity-2.9.1-3.10.9-201308202015.patch
@@ -281,7 +281,7 @@ index 2fe6e76..889ee23 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 33e36ab..31f1dc8 100644
+index 4b31d62..ac99d49 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2144,33 +2144,6 @@ index f00b569..aa5bb41 100644
/*
* Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
-index bdf2b84..aa9b4ac 100644
---- a/arch/arm/include/asm/tlb.h
-+++ b/arch/arm/include/asm/tlb.h
-@@ -43,6 +43,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- struct vm_area_struct *vma;
-+ unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
-@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = fullmm;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f760..de33b13 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -2889,33 +2862,18 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
-index d9f5cd4..e186ee1 100644
+index e19edc6..e186ee1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
-@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
- static int
- armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
- {
-- int mapping = (*event_map)[config];
-+ int mapping;
-+
-+ if (config >= PERF_COUNT_HW_MAX)
-+ return -EINVAL;
-+
-+ mapping = (*event_map)[config];
- return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
- }
+@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ int mapping;
-@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu *leader_pmu = event->group_leader->pmu;
-
-+ if (is_software_event(event))
-+ return 1;
-+
- if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
- return 1;
+ if (config >= PERF_COUNT_HW_MAX)
+- return -ENOENT;
++ return -EINVAL;
+ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e..b36e225 100644
--- a/arch/arm/kernel/perf_event_cpu.c
@@ -2930,21 +2888,9 @@ index 1f2740e..b36e225 100644
};
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 5bc2615..4f1a0c2 100644
+index 5bc2615..dcd439f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -28,10 +28,10 @@
- #include <linux/tick.h>
- #include <linux/utsname.h>
- #include <linux/uaccess.h>
--#include <linux/random.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/cpuidle.h>
- #include <linux/leds.h>
-+#include <linux/random.h>
-
- #include <asm/cacheflush.h>
- #include <asm/idmap.h>
@@ -223,6 +223,7 @@ void machine_power_off(void)
if (pm_power_off)
@@ -4543,33 +4489,6 @@ index ce6d763..cfea917 100644
extern void *samsung_dmadev_get_ops(void);
extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
-index 654f096..5546653 100644
---- a/arch/arm64/include/asm/tlb.h
-+++ b/arch/arm64/include/asm/tlb.h
-@@ -35,6 +35,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- struct vm_area_struct *vma;
-+ unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
-@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = fullmm;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f4726dc..39ed646 100644
--- a/arch/arm64/kernel/debug-monitors.c
@@ -4979,45 +4898,6 @@ index 54ff557..70c88b7 100644
}
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
-index ef3a9de..bc5efc7 100644
---- a/arch/ia64/include/asm/tlb.h
-+++ b/arch/ia64/include/asm/tlb.h
-@@ -22,7 +22,7 @@
- * unmapping a portion of the virtual address space, these hooks are called according to
- * the following template:
- *
-- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
-+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
- * {
- * for each vma that needs a shootdown do {
- * tlb_start_vma(tlb, vma);
-@@ -58,6 +58,7 @@ struct mmu_gather {
- unsigned int max;
- unsigned char fullmm; /* non-zero means full mm flush */
- unsigned char need_flush; /* really unmapped some PTEs? */
-+ unsigned long start, end;
- unsigned long start_addr;
- unsigned long end_addr;
- struct page **pages;
-@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
-- tlb->fullmm = full_mm_flush;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->start_addr = ~0UL;
- }
-
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 449c8c0..18965fb 100644
--- a/arch/ia64/include/asm/uaccess.h
@@ -7645,34 +7525,6 @@ index c4a93d6..4d2a9b4 100644
+#define arch_align_stack(x) ((x) & ~0xfUL)
#endif /* __ASM_EXEC_H */
-diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
-index b75d7d6..6d6d92b 100644
---- a/arch/s390/include/asm/tlb.h
-+++ b/arch/s390/include/asm/tlb.h
-@@ -32,6 +32,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- struct mmu_table_batch *batch;
- unsigned int fullmm;
-+ unsigned long start, end;
- };
-
- struct mmu_table_batch {
-@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
- static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm,
-- unsigned int full_mm_flush)
-+ unsigned long start,
-+ unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
- tlb->batch = NULL;
- if (tlb->fullmm)
- __tlb_flush_mm(mm);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4..e40cbef 100644
--- a/arch/s390/include/asm/uaccess.h
@@ -7941,25 +7793,6 @@ index ef9e555..331bd29 100644
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
-index e61d43d..362192e 100644
---- a/arch/sh/include/asm/tlb.h
-+++ b/arch/sh/include/asm/tlb.h
-@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
- }
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 03f2b55..b0270327 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -10595,25 +10428,6 @@ index 0032f92..cd151e0 100644
#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
-index 4febacd..29b0301 100644
---- a/arch/um/include/asm/tlb.h
-+++ b/arch/um/include/asm/tlb.h
-@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
- }
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index bbcef52..6a2a483 100644
--- a/arch/um/kernel/process.c
@@ -15963,7 +15777,7 @@ index e642300..0ef8f31 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 22224b3..c5d8d7d 100644
+index 22224b3..b3a2f90 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
@@ -16006,7 +15820,39 @@ index 22224b3..c5d8d7d 100644
#endif
#ifdef CONFIG_X86_32
unsigned long ip;
-@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
+ extern unsigned long mmu_cr4_features;
+ extern u32 *trampoline_cr4_features;
+
+-static inline void set_in_cr4(unsigned long mask)
+-{
+- unsigned long cr4;
+-
+- mmu_cr4_features |= mask;
+- if (trampoline_cr4_features)
+- *trampoline_cr4_features = mmu_cr4_features;
+- cr4 = read_cr4();
+- cr4 |= mask;
+- write_cr4(cr4);
+-}
+-
+-static inline void clear_in_cr4(unsigned long mask)
+-{
+- unsigned long cr4;
+-
+- mmu_cr4_features &= ~mask;
+- if (trampoline_cr4_features)
+- *trampoline_cr4_features = mmu_cr4_features;
+- cr4 = read_cr4();
+- cr4 &= ~mask;
+- write_cr4(cr4);
+-}
++extern void set_in_cr4(unsigned long mask);
++extern void clear_in_cr4(unsigned long mask);
+
+ typedef struct {
+ unsigned long seg;
+@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE
@@ -16027,7 +15873,7 @@ index 22224b3..c5d8d7d 100644
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
-@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define INIT_TSS { \
.x86_tss = { \
@@ -16036,7 +15882,7 @@ index 22224b3..c5d8d7d 100644
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
@@ -16049,7 +15895,7 @@ index 22224b3..c5d8d7d 100644
/*
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
@@ -16058,7 +15904,7 @@ index 22224b3..c5d8d7d 100644
__regs__ - 1; \
})
-@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*
* User space process size. 47bits minus one guard page.
*/
@@ -16074,7 +15920,7 @@ index 22224b3..c5d8d7d 100644
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
@@ -16088,7 +15934,7 @@ index 22224b3..c5d8d7d 100644
}
/*
-@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -16099,7 +15945,7 @@ index 22224b3..c5d8d7d 100644
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
/* Get/set a process' ability to use the timestamp counter instruction */
-@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
+@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
@@ -16109,7 +15955,7 @@ index 22224b3..c5d8d7d 100644
};
static inline void get_aperfmperf(struct aperfmperf *am)
-@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
+@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
return ratio;
}
@@ -16118,7 +15964,7 @@ index 22224b3..c5d8d7d 100644
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
+@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
#define xen_set_default_idle 0
#endif
@@ -16960,31 +16806,31 @@ index a1df6e8..e002940 100644
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
-index 50a7fc0..d00c622 100644
+index 50a7fc0..7c437a7 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
-@@ -17,18 +17,39 @@
+@@ -17,18 +17,40 @@
static inline void __native_flush_tlb(void)
{
-- native_write_cr3(native_read_cr3());
++ if (static_cpu_has(X86_FEATURE_INVPCID)) {
++ unsigned long descriptor[2];
++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
++ return;
++ }
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ if (static_cpu_has(X86_FEATURE_PCID)) {
+ unsigned int cpu = raw_get_cpu();
+
-+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
-+ unsigned long descriptor[2];
-+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
-+ } else {
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
-+ }
++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+ raw_put_cpu_no_resched();
-+ } else
++ return;
++ }
+#endif
+
-+ native_write_cr3(native_read_cr3());
+ native_write_cr3(native_read_cr3());
}
static inline void __native_flush_tlb_global_irq_disabled(void)
@@ -17010,41 +16856,49 @@ index 50a7fc0..d00c622 100644
}
static inline void __native_flush_tlb_global(void)
-@@ -49,7 +70,33 @@ static inline void __native_flush_tlb_global(void)
+@@ -49,6 +71,42 @@ static inline void __native_flush_tlb_global(void)
static inline void __native_flush_tlb_single(unsigned long addr)
{
-- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ if (static_cpu_has(X86_FEATURE_PCID) && addr < TASK_SIZE_MAX) {
-+ unsigned int cpu = raw_get_cpu();
++ if (static_cpu_has(X86_FEATURE_INVPCID)) {
++ unsigned long descriptor[2];
+
-+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
-+ unsigned long descriptor[2];
-+ descriptor[0] = PCID_USER;
-+ descriptor[1] = addr;
++ descriptor[0] = PCID_KERNEL;
++ descriptor[1] = addr;
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
++ if (addr < TASK_SIZE_MAX)
++ descriptor[1] += pax_user_shadow_base;
+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
-+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
-+ descriptor[0] = PCID_KERNEL;
-+ descriptor[1] = addr + pax_user_shadow_base;
-+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
-+ }
-+ } else {
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
-+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
-+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF))
-+ asm volatile("invlpg (%0)" ::"r" (addr + pax_user_shadow_base) : "memory");
+ }
-+ raw_put_cpu_no_resched();
-+ } else
++
++ descriptor[0] = PCID_USER;
++ descriptor[1] = addr;
+#endif
+
++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++ return;
++ }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (static_cpu_has(X86_FEATURE_PCID)) {
++ unsigned int cpu = raw_get_cpu();
++
++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++ raw_put_cpu_no_resched();
++
++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
++ addr += pax_user_shadow_base;
++ }
++#endif
++
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
- static inline void __flush_tlb_all(void)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5ee2687..74590b9 100644
--- a/arch/x86/include/asm/uaccess.h
@@ -18604,7 +18458,7 @@ index 5013a48..0782c53 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 22018f7..a5883af 100644
+index 22018f7..df77e23 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -18668,48 +18522,65 @@ index 22018f7..a5883af 100644
static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -288,6 +234,40 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
set_in_cr4(X86_CR4_SMAP);
}
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#ifdef CONFIG_X86_64
+static __init int setup_disable_pcid(char *arg)
+{
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
+ if (clone_pgd_mask != ~(pgdval_t)0UL)
+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++#endif
++
+ return 1;
+}
+__setup("nopcid", setup_disable_pcid);
+
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
-+ if (cpu_has(c, X86_FEATURE_PCID))
-+ printk("PAX: PCID detected\n");
++ if (!cpu_has(c, X86_FEATURE_PCID)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (clone_pgd_mask != ~(pgdval_t)0UL) {
++ pax_open_kernel();
++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++ pax_close_kernel();
++ printk("PAX: slow and weak UDEREF enabled\n");
++ } else
++ printk("PAX: UDEREF disabled\n");
++#endif
++
++ return;
++ }
++
++ printk("PAX: PCID detected\n");
++ set_in_cr4(X86_CR4_PCIDE);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_open_kernel();
++ clone_pgd_mask = ~(pgdval_t)0UL;
++ pax_close_kernel();
++ if (pax_user_shadow_base)
++ printk("PAX: weak UDEREF enabled\n");
++ else {
++ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
++ printk("PAX: strong UDEREF enabled\n");
++ }
++#endif
+
+ if (cpu_has(c, X86_FEATURE_INVPCID))
+ printk("PAX: INVPCID detected\n");
-+
-+ if (cpu_has(c, X86_FEATURE_PCID)) {
-+ set_in_cr4(X86_CR4_PCIDE);
-+ clone_pgd_mask = ~(pgdval_t)0UL;
-+ if (pax_user_shadow_base)
-+ printk("PAX: weak UDEREF enabled\n");
-+ else {
-+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
-+ printk("PAX: strong UDEREF enabled\n");
-+ }
-+ } else if (pax_user_shadow_base)
-+ printk("PAX: slow and weak UDEREF enabled\n");
-+ else
-+ printk("PAX: UDEREF disabled\n");
+}
+#endif
+
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -386,7 +366,7 @@ void switch_to_new_gdt(int cpu)
+@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -18718,18 +18589,18 @@ index 22018f7..a5883af 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -874,6 +854,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#ifdef CONFIG_X86_64
+ setup_pcid(c);
+#endif
+
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -882,6 +866,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
@@ -18740,7 +18611,7 @@ index 22018f7..a5883af 100644
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
-@@ -1069,10 +1057,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -18755,7 +18626,7 @@ index 22018f7..a5883af 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
-@@ -1086,7 +1076,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -18764,7 +18635,7 @@ index 22018f7..a5883af 100644
EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1231,7 +1221,7 @@ void __cpuinit cpu_init(void)
+@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
load_ucode_ap();
cpu = stack_smp_processor_id();
@@ -18773,7 +18644,7 @@ index 22018f7..a5883af 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1257,7 +1247,7 @@ void __cpuinit cpu_init(void)
+@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
switch_to_new_gdt(cpu);
loadsegment(fs, 0);
@@ -18782,7 +18653,7 @@ index 22018f7..a5883af 100644
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
-@@ -1266,7 +1256,6 @@ void __cpuinit cpu_init(void)
+@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -18790,7 +18661,7 @@ index 22018f7..a5883af 100644
enable_x2apic();
/*
-@@ -1318,7 +1307,7 @@ void __cpuinit cpu_init(void)
+@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -19193,7 +19064,7 @@ index a9e2207..d70c83a 100644
intel_ds_init();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 52441a2..f94fae8 100644
+index 8aac56b..588fb13 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -20461,7 +20332,7 @@ index 8f3e2de..6b71e39 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 7272089..833fdf8 100644
+index 7272089..0b74104 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -59,6 +59,8 @@
@@ -20548,7 +20419,7 @@ index 7272089..833fdf8 100644
#endif
-@@ -284,6 +293,427 @@ ENTRY(native_usergs_sysret64)
+@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -20738,10 +20609,11 @@ index 7272089..833fdf8 100644
+ .popsection
+ GET_CR3_INTO_RDI
+ cmp $1,%dil
-+ jnz 3f
++ jnz 4f
+ sub $4097,%rdi
+ bts $63,%rdi
-+ jmp 2f
++ SET_RDI_INTO_CR3
++ jmp 3f
+111:
+
+ GET_CR3_INTO_RDI
@@ -20772,13 +20644,15 @@ index 7272089..833fdf8 100644
+ i = i + 1
+ .endr
+
++2: SET_RDI_INTO_CR3
++
+#ifdef CONFIG_PAX_KERNEXEC
+ GET_CR0_INTO_RDI
+ bts $16,%rdi
+ SET_RDI_INTO_CR0
+#endif
+
-+2: SET_RDI_INTO_CR3
++3:
+
+#ifdef CONFIG_PARAVIRT
+ PV_RESTORE_REGS(CLBR_RDI)
@@ -20788,7 +20662,7 @@ index 7272089..833fdf8 100644
+ popq %rdi
+ pax_force_retaddr
+ retq
-+3: ud2
++4: ud2
+ENDPROC(pax_enter_kernel_user)
+
+ENTRY(pax_exit_kernel_user)
@@ -20814,14 +20688,22 @@ index 7272089..833fdf8 100644
+ SET_RDI_INTO_CR3
+ jmp 2f
+1:
++
+ mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ jnc 3f
++ SET_RDI_INTO_CR0
++#endif
++
+ add $__START_KERNEL_map,%rbx
+ sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+ cmpl $0, pv_info+PARAVIRT_enabled
+ jz 1f
-+ pushq %rdi
+ i = 0
+ .rept USER_PGD_PTRS
+ mov i*8(%rbx),%rsi
@@ -20830,18 +20712,10 @@ index 7272089..833fdf8 100644
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+ i = i + 1
+ .endr
-+ popq %rdi
+ jmp 2f
+1:
+#endif
+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ btr $16,%rdi
-+ jnc 3f
-+ SET_RDI_INTO_CR0
-+#endif
-+
+ i = 0
+ .rept USER_PGD_PTRS
+ movb $0x67,i*8(%rbx)
@@ -20976,7 +20850,7 @@ index 7272089..833fdf8 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -375,8 +805,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro UNFAKE_STACK_FRAME
@@ -20987,7 +20861,7 @@ index 7272089..833fdf8 100644
.endm
/*
-@@ -463,7 +893,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
movq %rsp, %rsi
leaq -RBP(%rsp),%rdi /* arg1 for handler */
@@ -20996,7 +20870,7 @@ index 7272089..833fdf8 100644
je 1f
SWAPGS
/*
-@@ -498,9 +928,10 @@ ENTRY(save_rest)
+@@ -498,9 +931,10 @@ ENTRY(save_rest)
movq_cfi r15, R15+16
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
@@ -21008,7 +20882,7 @@ index 7272089..833fdf8 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -529,9 +960,10 @@ ENTRY(save_paranoid)
+@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -21021,7 +20895,7 @@ index 7272089..833fdf8 100644
.popsection
/*
-@@ -553,7 +985,7 @@ ENTRY(ret_from_fork)
+@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -21030,7 +20904,7 @@ index 7272089..833fdf8 100644
jz 1f
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -571,7 +1003,7 @@ ENTRY(ret_from_fork)
+@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21039,7 +20913,7 @@ index 7272089..833fdf8 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -608,7 +1040,7 @@ END(ret_from_fork)
+@@ -608,7 +1043,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -21048,7 +20922,7 @@ index 7272089..833fdf8 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -621,16 +1053,23 @@ GLOBAL(system_call_after_swapgs)
+@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -21074,7 +20948,7 @@ index 7272089..833fdf8 100644
jnz tracesys
system_call_fastpath:
#if __SYSCALL_MASK == ~0
-@@ -640,7 +1079,7 @@ system_call_fastpath:
+@@ -640,7 +1082,7 @@ system_call_fastpath:
cmpl $__NR_syscall_max,%eax
#endif
ja badsys
@@ -21083,7 +20957,7 @@ index 7272089..833fdf8 100644
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
/*
-@@ -654,10 +1093,13 @@ sysret_check:
+@@ -654,10 +1096,13 @@ sysret_check:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -21098,7 +20972,7 @@ index 7272089..833fdf8 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -709,14 +1151,18 @@ badsys:
+@@ -709,14 +1154,18 @@ badsys:
* jump back to the normal fast path.
*/
auditsys:
@@ -21118,7 +20992,7 @@ index 7272089..833fdf8 100644
jmp system_call_fastpath
/*
-@@ -737,7 +1183,7 @@ sysret_audit:
+@@ -737,7 +1186,7 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -21127,7 +21001,7 @@ index 7272089..833fdf8 100644
jz auditsys
#endif
SAVE_REST
-@@ -745,12 +1191,16 @@ tracesys:
+@@ -745,12 +1194,16 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -21144,7 +21018,7 @@ index 7272089..833fdf8 100644
RESTORE_REST
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
-@@ -759,7 +1209,7 @@ tracesys:
+@@ -759,7 +1212,7 @@ tracesys:
cmpl $__NR_syscall_max,%eax
#endif
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
@@ -21153,7 +21027,7 @@ index 7272089..833fdf8 100644
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
-@@ -780,7 +1230,9 @@ GLOBAL(int_with_check)
+@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
@@ -21164,7 +21038,7 @@ index 7272089..833fdf8 100644
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
-@@ -826,7 +1278,7 @@ int_restore_rest:
+@@ -826,7 +1281,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -21173,7 +21047,7 @@ index 7272089..833fdf8 100644
.macro FORK_LIKE func
ENTRY(stub_\func)
-@@ -839,9 +1291,10 @@ ENTRY(stub_\func)
+@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
DEFAULT_FRAME 0 8 /* offset 8: return address */
call sys_\func
RESTORE_TOP_OF_STACK %r11, 8
@@ -21185,7 +21059,7 @@ index 7272089..833fdf8 100644
.endm
.macro FIXED_FRAME label,func
-@@ -851,9 +1304,10 @@ ENTRY(\label)
+@@ -851,9 +1307,10 @@ ENTRY(\label)
FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
call \func
RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
@@ -21197,7 +21071,7 @@ index 7272089..833fdf8 100644
.endm
FORK_LIKE clone
-@@ -870,9 +1324,10 @@ ENTRY(ptregscall_common)
+@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
movq_cfi_restore R12+8, r12
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
@@ -21209,7 +21083,7 @@ index 7272089..833fdf8 100644
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -885,7 +1340,7 @@ ENTRY(stub_execve)
+@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21218,7 +21092,7 @@ index 7272089..833fdf8 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -902,7 +1357,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21227,7 +21101,7 @@ index 7272089..833fdf8 100644
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
-@@ -916,7 +1371,7 @@ ENTRY(stub_x32_rt_sigreturn)
+@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21236,7 +21110,7 @@ index 7272089..833fdf8 100644
ENTRY(stub_x32_execve)
CFI_STARTPROC
-@@ -930,7 +1385,7 @@ ENTRY(stub_x32_execve)
+@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21245,7 +21119,7 @@ index 7272089..833fdf8 100644
#endif
-@@ -967,7 +1422,7 @@ vector=vector+1
+@@ -967,7 +1425,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -21254,7 +21128,7 @@ index 7272089..833fdf8 100644
.previous
END(interrupt)
-@@ -987,6 +1442,16 @@ END(interrupt)
+@@ -987,6 +1445,16 @@ END(interrupt)
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
SAVE_ARGS_IRQ
@@ -21271,7 +21145,7 @@ index 7272089..833fdf8 100644
call \func
.endm
-@@ -1019,7 +1484,7 @@ ret_from_intr:
+@@ -1019,7 +1487,7 @@ ret_from_intr:
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -21280,7 +21154,7 @@ index 7272089..833fdf8 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -1041,12 +1506,16 @@ retint_swapgs: /* return to user-space */
+@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -21297,7 +21171,7 @@ index 7272089..833fdf8 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1129,7 +1598,7 @@ ENTRY(retint_kernel)
+@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -21306,7 +21180,7 @@ index 7272089..833fdf8 100644
/*
* End of kprobes section
*/
-@@ -1147,7 +1616,7 @@ ENTRY(\sym)
+@@ -1147,7 +1619,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -21315,7 +21189,7 @@ index 7272089..833fdf8 100644
.endm
#ifdef CONFIG_SMP
-@@ -1208,12 +1677,22 @@ ENTRY(\sym)
+@@ -1208,12 +1680,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -21339,7 +21213,7 @@ index 7272089..833fdf8 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1226,15 +1705,25 @@ ENTRY(\sym)
+@@ -1226,15 +1708,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -21367,7 +21241,7 @@ index 7272089..833fdf8 100644
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1245,14 +1734,30 @@ ENTRY(\sym)
+@@ -1245,14 +1737,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF_DEBUG
@@ -21399,7 +21273,7 @@ index 7272089..833fdf8 100644
.endm
.macro errorentry sym do_sym
-@@ -1264,13 +1769,23 @@ ENTRY(\sym)
+@@ -1264,13 +1772,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -21424,7 +21298,7 @@ index 7272089..833fdf8 100644
.endm
/* error code is on the stack already */
-@@ -1284,13 +1799,23 @@ ENTRY(\sym)
+@@ -1284,13 +1802,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -21449,7 +21323,7 @@ index 7272089..833fdf8 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1320,9 +1845,10 @@ gs_change:
+@@ -1320,9 +1848,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -21461,7 +21335,7 @@ index 7272089..833fdf8 100644
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1350,9 +1876,10 @@ ENTRY(call_softirq)
+@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -21473,7 +21347,7 @@ index 7272089..833fdf8 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1390,7 +1917,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -21482,7 +21356,7 @@ index 7272089..833fdf8 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1449,7 +1976,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -21491,7 +21365,7 @@ index 7272089..833fdf8 100644
apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1501,18 +2028,33 @@ ENTRY(paranoid_exit)
+@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -21527,7 +21401,7 @@ index 7272089..833fdf8 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1541,7 +2083,7 @@ paranoid_schedule:
+@@ -1541,7 +2086,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -21536,7 +21410,7 @@ index 7272089..833fdf8 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1568,12 +2110,13 @@ ENTRY(error_entry)
+@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -21551,7 +21425,7 @@ index 7272089..833fdf8 100644
ret
/*
-@@ -1600,7 +2143,7 @@ bstep_iret:
+@@ -1600,7 +2146,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -21560,7 +21434,7 @@ index 7272089..833fdf8 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1611,7 +2154,7 @@ ENTRY(error_exit)
+@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
@@ -21569,7 +21443,7 @@ index 7272089..833fdf8 100644
jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
-@@ -1620,7 +2163,7 @@ ENTRY(error_exit)
+@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -21578,7 +21452,7 @@ index 7272089..833fdf8 100644
/*
* Test if a given stack is an NMI stack or not.
-@@ -1678,9 +2221,11 @@ ENTRY(nmi)
+@@ -1678,9 +2224,11 @@ ENTRY(nmi)
* If %cs was not the kernel segment, then the NMI triggered in user
* space, which means it is definitely not nested.
*/
@@ -21591,7 +21465,7 @@ index 7272089..833fdf8 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
-@@ -1714,8 +2259,7 @@ nested_nmi:
+@@ -1714,8 +2262,7 @@ nested_nmi:
1:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
@@ -21601,7 +21475,7 @@ index 7272089..833fdf8 100644
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
-@@ -1733,6 +2277,7 @@ nested_nmi_out:
+@@ -1733,6 +2280,7 @@ nested_nmi_out:
CFI_RESTORE rdx
/* No need to check faults here */
@@ -21609,7 +21483,7 @@ index 7272089..833fdf8 100644
INTERRUPT_RETURN
CFI_RESTORE_STATE
-@@ -1849,6 +2394,8 @@ end_repeat_nmi:
+@@ -1849,6 +2397,8 @@ end_repeat_nmi:
*/
movq %cr2, %r12
@@ -21618,7 +21492,7 @@ index 7272089..833fdf8 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1861,26 +2408,31 @@ end_repeat_nmi:
+@@ -1861,26 +2411,31 @@ end_repeat_nmi:
movq %r12, %cr2
1:
@@ -22473,7 +22347,7 @@ index a836860..1b5c665 100644
- .skip PAGE_SIZE
+ .fill 512,8,0
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 0fa6912..37fce70 100644
+index 0fa6912..b37438b 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
@@ -22489,7 +22363,7 @@ index 0fa6912..37fce70 100644
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
-@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
+@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
@@ -22497,6 +22371,10 @@ index 0fa6912..37fce70 100644
+#ifdef CONFIG_PAX_KERNEXEC
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
+#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index f7ea30d..6318acc 100644
--- a/arch/x86/kernel/i387.c
@@ -24248,7 +24126,7 @@ index f2bb9c9..bed145d7 100644
1:
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 56f7fcf..3b88ad1 100644
+index 56f7fcf..2cfe4f1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -110,6 +110,7 @@
@@ -24259,7 +24137,7 @@ index 56f7fcf..3b88ad1 100644
/*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
-@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
+@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
#endif
@@ -24274,8 +24152,46 @@ index 56f7fcf..3b88ad1 100644
+unsigned long mmu_cr4_features __read_only;
#endif
++void set_in_cr4(unsigned long mask)
++{
++ unsigned long cr4 = read_cr4();
++
++ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
++ return;
++
++ pax_open_kernel();
++ mmu_cr4_features |= mask;
++ pax_close_kernel();
++
++ if (trampoline_cr4_features)
++ *trampoline_cr4_features = mmu_cr4_features;
++ cr4 |= mask;
++ write_cr4(cr4);
++}
++EXPORT_SYMBOL(set_in_cr4);
++
++void clear_in_cr4(unsigned long mask)
++{
++ unsigned long cr4 = read_cr4();
++
++ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
++ return;
++
++ pax_open_kernel();
++ mmu_cr4_features &= ~mask;
++ pax_close_kernel();
++
++ if (trampoline_cr4_features)
++ *trampoline_cr4_features = mmu_cr4_features;
++ cr4 &= ~mask;
++ write_cr4(cr4);
++}
++EXPORT_SYMBOL(clear_in_cr4);
++
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
+ int bootloader_type, bootloader_version;
+
+@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
switch (data->type) {
case SETUP_E820_EXT:
@@ -24284,7 +24200,7 @@ index 56f7fcf..3b88ad1 100644
break;
case SETUP_DTB:
add_dtb(pa_data);
-@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
+@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
@@ -24293,7 +24209,7 @@ index 56f7fcf..3b88ad1 100644
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
-@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
+@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
/* called before trim_bios_range() to spare extra sanitize */
static void __init e820_add_kernel_range(void)
{
@@ -24302,7 +24218,7 @@ index 56f7fcf..3b88ad1 100644
u64 size = __pa_symbol(_end) - start;
/*
-@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
+@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
void __init setup_arch(char **cmdline_p)
{
@@ -24315,7 +24231,7 @@ index 56f7fcf..3b88ad1 100644
early_reserve_initrd();
-@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -24516,7 +24432,7 @@ index 48d2b7d..90d328a 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index bfd348e..f0c1bf2 100644
+index bfd348e..914f323 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
@@ -24569,7 +24485,7 @@ index bfd348e..f0c1bf2 100644
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
-@@ -908,6 +915,18 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
/* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu);
@@ -24582,9 +24498,6 @@ index bfd348e..f0c1bf2 100644
+ KERNEL_PGD_PTRS);
+#endif
+
-+ /* the FPU context is blank, nobody can own it */
-+ __cpu_disable_lazy_restore(cpu);
-+
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
@@ -24821,7 +24734,7 @@ index 0000000..5877189
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+}
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index dbded5a..ace2781 100644
+index 48f8375..ace2781 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -81,8 +81,8 @@ out:
@@ -24839,7 +24752,7 @@ index dbded5a..ace2781 100644
*begin = new_begin;
}
} else {
-- *begin = TASK_UNMAPPED_BASE;
+- *begin = mmap_legacy_base();
+ *begin = mm->mmap_base;
*end = TASK_SIZE;
}
@@ -25600,7 +25513,7 @@ index 9a907a6..f83f921 100644
(unsigned long)VSYSCALL_START);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index b014d94..6d6ca7b 100644
+index b014d94..e775258 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
@@ -25612,6 +25525,14 @@ index b014d94..6d6ca7b 100644
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
+ #ifndef CONFIG_PARAVIRT
+ EXPORT_SYMBOL(native_load_gs_index);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14db..075bb9b 100644
--- a/arch/x86/kernel/x86_init.c
@@ -30647,7 +30568,7 @@ index d87dd6d..bf3fa66 100644
pte = kmemcheck_pte_lookup(address);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 845df68..1d8d29f 100644
+index c1af323..4758dad 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
@@ -30687,8 +30608,8 @@ index 845df68..1d8d29f 100644
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
--static unsigned long mmap_legacy_base(void)
-+static unsigned long mmap_legacy_base(struct mm_struct *mm)
+-unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(struct mm_struct *mm)
{
- if (mmap_is_ia32())
+ if (mmap_is_ia32()) {
@@ -30705,7 +30626,7 @@ index 845df68..1d8d29f 100644
return TASK_UNMAPPED_BASE + mmap_rnd();
}
-@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
+@@ -113,11 +126,23 @@ unsigned long mmap_legacy_base(void)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
if (mmap_is_legacy()) {
@@ -41951,19 +41872,6 @@ index f975696..4597e21 100644
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
-diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
-index 25723d8..925ab8e 100644
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
-@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
- if ((mc->ptr + rec_len) > mc->end)
- goto decode_failed;
-
-- memcpy(cf->data, mc->ptr, rec_len);
-+ memcpy(cf->data, mc->ptr, cf->can_dlc);
- mc->ptr += rec_len;
- }
-
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d2643..7f4133b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
@@ -45276,45 +45184,25 @@ index c699a30..b90a5fd 100644
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
-index d7e51e4..d07eaab 100644
---- a/drivers/staging/zcache/tmem.c
-+++ b/drivers/staging/zcache/tmem.c
-@@ -51,7 +51,7 @@
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
--static struct tmem_hostops tmem_hostops;
-+static tmem_hostops_no_const tmem_hostops;
-
- static void tmem_objnode_tree_init(void);
-
-@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation.
- */
--static struct tmem_pamops tmem_pamops;
-+static tmem_pamops_no_const tmem_pamops;
-
- void tmem_register_pamops(struct tmem_pamops *m)
- {
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
-index d128ce2..a43980c 100644
+index d128ce2..fc1f9a1 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
-@@ -226,6 +226,7 @@ struct tmem_pamops {
+@@ -225,7 +225,7 @@ struct tmem_pamops {
+ bool (*is_remote)(void *);
int (*replace_in_obj)(void *, struct tmem_obj *);
#endif
- };
-+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+-};
++} __no_const;
extern void tmem_register_pamops(struct tmem_pamops *m);
/* memory allocation methods provided by the host implementation */
-@@ -235,6 +236,7 @@ struct tmem_hostops {
+@@ -234,7 +234,7 @@ struct tmem_hostops {
+ void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
- };
-+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+-};
++} __no_const;
extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
@@ -46983,7 +46871,7 @@ index d6bea3e..60b250e 100644
/**
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
-index 6ef94bc..1b41265 100644
+index 028fc83..65bb105 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -296,7 +296,7 @@ out:
@@ -51934,6 +51822,28 @@ index f02d82b..2632cf86 100644
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 7d377c9..3fb6559 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
+ /*
+ * construct our own bdi so we can control readahead, etc.
+ */
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ static int ceph_register_bdi(struct super_block *sb,
+ struct ceph_fs_client *fsc)
+@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
+ default_backing_dev_info.ra_pages;
+
+ err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
+- atomic_long_inc_return(&bdi_seq));
++ atomic_long_inc_return_unchecked(&bdi_seq));
+ if (!err)
+ sb->s_bdi = &fsc->backing_dev_info;
+ return err;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index d597483..747901b 100644
--- a/fs/cifs/cifs_debug.c
@@ -52823,7 +52733,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index ffd7a81..3c84660 100644
+index 1f44670..3c84660 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,8 +55,20 @@
@@ -53030,24 +52940,6 @@ index ffd7a81..3c84660 100644
/*
* cover the whole range: [new_start, old_end)
*/
-@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- return -ENOMEM;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
- if (new_end > old_start) {
- /*
- * when the old and new regions overlap clear from new_end.
-@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- free_pgd_range(&tlb, old_start, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- }
-- tlb_finish_mmu(&tlb, new_end, old_end);
-+ tlb_finish_mmu(&tlb, old_start, old_end);
-
- /*
- * Shrink the vma to just the new range. Always succeeds.
@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
@@ -58453,7 +58345,7 @@ index 6b6a993..807cccc 100644
kfree(s);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index 3e636d8..350cc48 100644
+index 65fc60a..350cc48 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -11,12 +11,19 @@
@@ -58620,34 +58512,6 @@ index 3e636d8..350cc48 100644
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
-@@ -792,14 +843,14 @@ typedef struct {
- } pagemap_entry_t;
-
- struct pagemapread {
-- int pos, len;
-+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
- pagemap_entry_t *buffer;
- };
-
- #define PAGEMAP_WALK_SIZE (PMD_SIZE)
- #define PAGEMAP_WALK_MASK (PMD_MASK)
-
--#define PM_ENTRY_BYTES sizeof(u64)
-+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
- #define PM_STATUS_BITS 3
- #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
- #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
- if (!count)
- goto out_task;
-
-- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
-+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
- ret = -ENOMEM;
- if (!pm.buffer)
- goto out_task;
@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
int n;
char buffer[50];
@@ -70560,19 +70424,6 @@ index a59ff51..2594a70 100644
#endif /* CONFIG_MMU */
#endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
-index 13821c3..5672d7e 100644
---- a/include/asm-generic/tlb.h
-+++ b/include/asm-generic/tlb.h
-@@ -112,7 +112,7 @@ struct mmu_gather {
-
- #define HAVE_GENERIC_MMU_GATHER
-
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
- void tlb_flush_mmu(struct mmu_gather *tlb);
- void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- unsigned long end);
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index c184aa8..d049942 100644
--- a/include/asm-generic/uaccess.h
@@ -74638,7 +74489,7 @@ index 6dacb93..6174423 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 178a8d9..450bf11 100644
+index 3aeb14b..73816a6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -62,6 +62,7 @@ struct bio_list;
@@ -74658,10 +74509,11 @@ index 178a8d9..450bf11 100644
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -314,6 +315,19 @@ struct nsproxy;
+@@ -314,7 +315,19 @@ struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
+-extern unsigned long mmap_legacy_base(void);
+
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
@@ -74674,11 +74526,11 @@ index 178a8d9..450bf11 100644
+
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
-+
++extern unsigned long mmap_legacy_base(struct mm_struct *mm);
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -591,6 +605,17 @@ struct signal_struct {
+@@ -592,6 +605,17 @@ struct signal_struct {
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
@@ -74696,7 +74548,7 @@ index 178a8d9..450bf11 100644
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
-@@ -671,6 +696,14 @@ struct user_struct {
+@@ -672,6 +696,14 @@ struct user_struct {
struct key *session_keyring; /* UID's default session keyring */
#endif
@@ -74711,7 +74563,7 @@ index 178a8d9..450bf11 100644
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
-@@ -1158,8 +1191,8 @@ struct task_struct {
+@@ -1159,8 +1191,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -74722,7 +74574,7 @@ index 178a8d9..450bf11 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1184,11 +1217,6 @@ struct task_struct {
+@@ -1185,11 +1217,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -74734,7 +74586,7 @@ index 178a8d9..450bf11 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1205,6 +1233,10 @@ struct task_struct {
+@@ -1206,6 +1233,10 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -74745,7 +74597,7 @@ index 178a8d9..450bf11 100644
/* filesystem information */
struct fs_struct *fs;
/* open file information */
-@@ -1278,6 +1310,10 @@ struct task_struct {
+@@ -1279,6 +1310,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
@@ -74756,7 +74608,7 @@ index 178a8d9..450bf11 100644
/* journalling filesystem info */
void *journal_info;
-@@ -1316,6 +1352,10 @@ struct task_struct {
+@@ -1317,6 +1352,10 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -74767,7 +74619,7 @@ index 178a8d9..450bf11 100644
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
-@@ -1416,8 +1456,76 @@ struct task_struct {
+@@ -1417,8 +1456,76 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -74844,7 +74696,7 @@ index 178a8d9..450bf11 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1476,7 +1584,7 @@ struct pid_namespace;
+@@ -1477,7 +1584,7 @@ struct pid_namespace;
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
@@ -74853,7 +74705,7 @@ index 178a8d9..450bf11 100644
{
return tsk->pid;
}
-@@ -1919,7 +2027,9 @@ void yield(void);
+@@ -1920,7 +2027,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -74863,7 +74715,7 @@ index 178a8d9..450bf11 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -1953,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -74871,7 +74723,7 @@ index 178a8d9..450bf11 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2119,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -74880,7 +74732,7 @@ index 178a8d9..450bf11 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2310,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -75446,7 +75298,7 @@ index a5ffd32..0935dea 100644
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 4147d70..d356a10 100644
+index 84662ec..d8f8adb 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -97,8 +97,12 @@ struct sigaltstack;
@@ -78730,7 +78582,7 @@ index 7bb73f9..d7978ed 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..11ee8a5 100644
+index ffbc090..08ceeee 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -79025,7 +78877,7 @@ index 987b28a..11ee8a5 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
+@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -79034,7 +78886,7 @@ index 987b28a..11ee8a5 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -79043,7 +78895,7 @@ index 987b28a..11ee8a5 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -80739,7 +80591,7 @@ index 42670e9..8719c2f 100644
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 424c2d4..a9194f7 100644
+index 424c2d4..679242f 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -43,6 +43,7 @@
@@ -80831,6 +80683,15 @@ index 424c2d4..a9194f7 100644
}
static int common_timer_create(struct k_itimer *new_timer)
+@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct k_itimer *new_timer;
+ int error, new_timer_id;
+- sigevent_t event;
++ sigevent_t event = { };
+ int it_id_set = IT_ID_NOT_SET;
+
+ if (!kc)
@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
if (copy_from_user(&new_tp, tp, sizeof (*tp)))
return -EFAULT;
@@ -82044,7 +81905,7 @@ index e8b3350..d83d44e 100644
.priority = CPU_PRI_MIGRATION,
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index c61a614..d7f3d7e 100644
+index 03b73be..9422b9f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
@@ -82056,7 +81917,7 @@ index c61a614..d7f3d7e 100644
p->mm->numa_scan_offset = 0;
}
-@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -84318,9 +84179,18 @@ index e742d06..c56fdd8 100644
config NOMMU_INITIAL_TRIM_EXCESS
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index 5025174..9fc1c5c 100644
+index 5025174..9d67dcd 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
+@@ -12,7 +12,7 @@
+ #include <linux/device.h>
+ #include <trace/events/writeback.h>
+
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
unsigned int cap)
@@ -84335,7 +84205,7 @@ index 5025174..9fc1c5c 100644
- sprintf(tmp, "%.28s%s", name, "-%d");
- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
-+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
++ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
if (err) {
bdi_destroy(bdi);
return err;
@@ -84405,7 +84275,7 @@ index b32b70c..e512eb0 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5cf99bf..5c01c2f 100644
+index 7c5eb85..5c01c2f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
@@ -84450,15 +84320,6 @@ index 5cf99bf..5c01c2f 100644
if (ret)
goto out;
-@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-
- mm = vma->vm_mm;
-
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- tlb_finish_mmu(&tlb, start, end);
- }
@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -84811,39 +84672,10 @@ index ceb0c7f..b2b8e94 100644
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
-index 5e50800..7c0340f 100644
+index 5a35443..7c0340f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
- * tear-down from @mm. The @fullmm argument is used when @mm is without
- * users and we're going to destroy the full address space (exit/execve).
- */
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-
-- tlb->fullmm = fullmm;
-+ /* Is it from 0 to ~0? */
-+ tlb->fullmm = !(start | (end+1));
- tlb->need_flush_all = 0;
-- tlb->start = -1UL;
-- tlb->end = 0;
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->need_flush = 0;
- tlb->local.next = NULL;
- tlb->local.nr = 0;
-@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
- {
- struct mmu_gather_batch *batch, *next;
-
-- tlb->start = start;
-- tlb->end = end;
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
-@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
@@ -84851,7 +84683,7 @@ index 5e50800..7c0340f 100644
start &= PUD_MASK;
if (start < floor)
return;
-@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
@@ -84860,7 +84692,7 @@ index 5e50800..7c0340f 100644
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
@@ -84868,7 +84700,7 @@ index 5e50800..7c0340f 100644
start &= PGDIR_MASK;
if (start < floor)
return;
-@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
@@ -84877,65 +84709,7 @@ index 5e50800..7c0340f 100644
}
/*
-@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
- spinlock_t *ptl;
- pte_t *start_pte;
- pte_t *pte;
-- unsigned long range_start = addr;
-
- again:
- init_rss_vec(rss);
-@@ -1204,17 +1208,25 @@ again:
- * and page-free while holding it.
- */
- if (force_flush) {
-+ unsigned long old_end;
-+
- force_flush = 0;
-
--#ifdef HAVE_GENERIC_MMU_GATHER
-- tlb->start = range_start;
-+ /*
-+ * Flush the TLB just for the previous segment,
-+ * then update the range to be the remaining
-+ * TLB range.
-+ */
-+ old_end = tlb->end;
- tlb->end = addr;
--#endif
-+
- tlb_flush_mmu(tlb);
-- if (addr != end) {
-- range_start = addr;
-+
-+ tlb->start = addr;
-+ tlb->end = old_end;
-+
-+ if (addr != end)
- goto again;
-- }
- }
-
- return addr;
-@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end = start + size;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- update_hiwater_rss(mm);
- mmu_notifier_invalidate_range_start(mm, start, end);
- for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
-@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
- unsigned long end = address + size;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, address, end);
- update_hiwater_rss(mm);
- mmu_notifier_invalidate_range_start(mm, address, end);
- unmap_single_vma(&tlb, vma, address, end, details);
-@@ -1638,12 +1650,6 @@ no_page_table:
+@@ -1644,12 +1650,6 @@ no_page_table:
return page;
}
@@ -84948,7 +84722,7 @@ index 5e50800..7c0340f 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -84961,7 +84735,7 @@ index 5e50800..7c0340f 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -84970,7 +84744,7 @@ index 5e50800..7c0340f 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -84982,7 +84756,7 @@ index 5e50800..7c0340f 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1895,7 +1896,7 @@ next_page:
+@@ -1901,7 +1896,7 @@ next_page:
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
@@ -84991,7 +84765,7 @@ index 5e50800..7c0340f 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -85002,7 +84776,7 @@ index 5e50800..7c0340f 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -85024,7 +84798,7 @@ index 5e50800..7c0340f 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -85032,7 +84806,7 @@ index 5e50800..7c0340f 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -85043,7 +84817,7 @@ index 5e50800..7c0340f 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -85054,7 +84828,7 @@ index 5e50800..7c0340f 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -85241,7 +85015,7 @@ index 5e50800..7c0340f 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2802,6 +3004,12 @@ gotten:
+@@ -2808,6 +3004,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -85254,7 +85028,7 @@ index 5e50800..7c0340f 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2853,6 +3061,10 @@ gotten:
+@@ -2859,6 +3061,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -85265,7 +85039,7 @@ index 5e50800..7c0340f 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -85277,7 +85051,7 @@ index 5e50800..7c0340f 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -85289,7 +85063,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3170,40 +3392,6 @@ out_release:
+@@ -3176,40 +3392,6 @@ out_release:
}
/*
@@ -85330,7 +85104,7 @@ index 5e50800..7c0340f 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -85363,7 +85137,7 @@ index 5e50800..7c0340f 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -85375,7 +85149,7 @@ index 5e50800..7c0340f 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3263,6 +3452,12 @@ setpte:
+@@ -3269,6 +3452,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -85388,7 +85162,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -85401,7 +85175,7 @@ index 5e50800..7c0340f 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -85416,7 +85190,7 @@ index 5e50800..7c0340f 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -85429,7 +85203,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -85440,7 +85214,7 @@ index 5e50800..7c0340f 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -85475,7 +85249,7 @@ index 5e50800..7c0340f 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -85499,7 +85273,7 @@ index 5e50800..7c0340f 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -85530,7 +85304,7 @@ index 5e50800..7c0340f 100644
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
+@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -85539,7 +85313,7 @@ index 5e50800..7c0340f 100644
return 0;
}
-@@ -4048,8 +4336,8 @@ out:
+@@ -4054,8 +4336,8 @@ out:
return ret;
}
@@ -85550,7 +85324,7 @@ index 5e50800..7c0340f 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -85561,7 +85335,7 @@ index 5e50800..7c0340f 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -85570,7 +85344,7 @@ index 5e50800..7c0340f 100644
void *maddr;
struct page *page = NULL;
-@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -85581,7 +85355,7 @@ index 5e50800..7c0340f 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -85758,7 +85532,7 @@ index 79b7cf7..9944291 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index 7dbe397..bfb7626 100644
+index 8d25fdc..bfb7626 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,7 @@
@@ -86627,15 +86401,6 @@ index 7dbe397..bfb7626 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
- struct mmu_gather tlb;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- update_hiwater_rss(mm);
- unmap_vmas(&tlb, vma, start, end);
- free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
@@ -86941,15 +86706,6 @@ index 7dbe397..bfb7626 100644
return addr;
}
-@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
-
- lru_add_drain();
- flush_cache_mm(mm);
-- tlb_gather_mmu(&tlb, mm, 1);
-+ tlb_gather_mmu(&tlb, mm, 0, -1);
- /* update_hiwater_rss(mm) here? but nobody should be looking */
- /* Use -1 here to ensure all VMAs in the mm are unmapped */
- unmap_vmas(&tlb, vma, 0, -1);
@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
@@ -93608,7 +93364,7 @@ index 57ee84d..8b99cf5 100644
);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 1076fe1..8285fd7 100644
+index 1076fe1..f190285 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -93649,27 +93405,6 @@ index 1076fe1..8285fd7 100644
return 0;
}
}
-@@ -789,6 +791,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- struct net *net = sock_net(skb->sk);
- int chains_to_skip = cb->args[0];
- int fams_to_skip = cb->args[1];
-+ bool need_locking = chains_to_skip || fams_to_skip;
-+
-+ if (need_locking)
-+ genl_lock();
-
- for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- n = 0;
-@@ -810,6 +816,9 @@ errout:
- cb->args[0] = i;
- cb->args[1] = n;
-
-+ if (need_locking)
-+ genl_unlock();
-+
- return skb->len;
- }
-
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ec0c80f..41e1830 100644
--- a/net/netrom/af_netrom.c