aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-virt-grsec
diff options
context:
space:
mode:
authorLeonardo Arena <rnalrd@alpinelinux.org>2013-09-03 09:54:02 +0000
committerLeonardo Arena <rnalrd@alpinelinux.org>2013-09-03 09:54:19 +0000
commite28146e4e86d47b4c3143e2d7d47b121e0fd9f0f (patch)
treebc04dd3f66a302251819f3f986d1f7b2d266a4d8 /main/linux-virt-grsec
parentb3f984eed354d39c4c436864c84595ec49665ad4 (diff)
downloadaports-e28146e4e86d47b4c3143e2d7d47b121e0fd9f0f.tar.bz2
aports-e28146e4e86d47b4c3143e2d7d47b121e0fd9f0f.tar.xz
main/linux-virt-grsec: upgrade to 3.10.10
Diffstat (limited to 'main/linux-virt-grsec')
-rw-r--r--main/linux-virt-grsec/APKBUILD28
-rw-r--r--main/linux-virt-grsec/grsecurity-2.9.1-3.10.10-201309011630.patch (renamed from main/linux-virt-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch)4678
-rw-r--r--main/linux-virt-grsec/kernelconfig.x867
-rw-r--r--main/linux-virt-grsec/kernelconfig.x86_647
4 files changed, 3879 insertions, 841 deletions
diff --git a/main/linux-virt-grsec/APKBUILD b/main/linux-virt-grsec/APKBUILD
index ec49fdc49a..387eeb31b1 100644
--- a/main/linux-virt-grsec/APKBUILD
+++ b/main/linux-virt-grsec/APKBUILD
@@ -3,7 +3,7 @@
_flavor=grsec
pkgname=linux-virt-${_flavor}
-pkgver=3.10.7
+pkgver=3.10.10
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -18,7 +18,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-2.9.1-3.10.7-201308171249.patch
+ grsecurity-2.9.1-3.10.10-201309011630.patch
0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
@@ -148,35 +148,35 @@ dev() {
}
md5sums="4f25cd5bec5f8d5a7d935b3f2ccb8481 linux-3.10.tar.xz
-6b1b6b62044fcf3624f067154d5c1666 patch-3.10.7.xz
-e8a352c746da4aaf2e14a89da6896023 grsecurity-2.9.1-3.10.7-201308171249.patch
+d010ef17d3e577fd1bdcb6887f2b9836 patch-3.10.10.xz
+93e8f4484f44dd0251ff5bb90bfa6505 grsecurity-2.9.1-3.10.10-201309011630.patch
a16f11b12381efb3bec79b9bfb329836 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
656ae7b10dd2f18dbfa1011041d08d60 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
aa454ffb96428586447775c21449e284 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
2a12a3717052e878c0cd42aa935bfcf4 0004-ipv4-rate-limit-updating-of-next-hop-exceptions-with.patch
6ce5fed63aad3f1a1ff1b9ba7b741822 0005-ipv4-use-separate-genid-for-next-hop-exceptions.patch
1a5800a2122ba0cc0d06733cb3bb8b8f 0006-ipv4-use-next-hop-exceptions-also-for-input-routes.patch
-246de0aecacde70ce26d9c4a4006aedb kernelconfig.x86
-307fc07ff32a2bc22f34eb2b1d0b886f kernelconfig.x86_64"
+539c848d541c1656851fe865018273df kernelconfig.x86
+82cd965fc82651f2e6b35e75c17d8031 kernelconfig.x86_64"
sha256sums="df27fa92d27a9c410bfe6c4a89f141638500d7eadcca5cce578954efc2ad3544 linux-3.10.tar.xz
-a92836d9ae477a7730c79d8ad521a2859ecdd8dea1ac0fa561fb5ce8517f5d1e patch-3.10.7.xz
-9424fb61b373fb3a84cdf0b82183ae4429158a8b582ef49a33af629557330e2a grsecurity-2.9.1-3.10.7-201308171249.patch
+22cb9a7721bacd40d83c2d630f672e09495ce9d29f896e874ea8669bb577e193 patch-3.10.10.xz
+ced13b573f77e5c17449a54fdc6252d3516a8ce2e44579cb4853a134ba2e89fb grsecurity-2.9.1-3.10.10-201309011630.patch
6af3757ac36a6cd3cda7b0a71b08143726383b19261294a569ad7f4042c72df3 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
dc8e82108615657f1fb9d641efd42255a5761c06edde1b00a41ae0d314d548f0 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
0985caa0f3ee8ed0959aeaa4214f5f8057ae8e61d50dcae39194912d31e14892 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
260fd1807838b68305a96992bf7d3302a2a8ef3a3b08fe079ba9a07e6422f736 0004-ipv4-rate-limit-updating-of-next-hop-exceptions-with.patch
ae32bb72afa170e6c3788c564b342763aba5945afacc1e2ebfc096adf50d77a3 0005-ipv4-use-separate-genid-for-next-hop-exceptions.patch
fc613ac466610b866b721c41836fd5bfb2d4b75bceb67972dc6369d7f62ff47e 0006-ipv4-use-next-hop-exceptions-also-for-input-routes.patch
-324ad615f077368699edc840e34470557be880d2c812a7048cf993c60cec0fa6 kernelconfig.x86
-20a4b46aad191452b7269288dbd205ee05c7d7681e2c129f6381aa2f9a7e8200 kernelconfig.x86_64"
+997d4c8a5a2b911047d26fe1bf8ee8d5cd3b7133e6abdc07b7deacd0b3eb2330 kernelconfig.x86
+7845194551137fbc3b69a75249696bc843bb7fe7f4a4e6b0582c0ca0856caa64 kernelconfig.x86_64"
sha512sums="5fb109fcbd59bf3dffc911b853894f0a84afa75151368f783a1252c5ff60c7a1504de216c0012be446df983e2dea400ad8eeed3ce04f24dc61d0ef76c174dc35 linux-3.10.tar.xz
-d34729cfca045f12077c44518171a5b933790b112f2576aa55ba7f6684567b04a6beea4da8a635dcc078a844f9cd47aa66ead1fd6d68b926fdc09ecb0ae34324 patch-3.10.7.xz
-1ddc7f9f28e5a8451a36b6cf800e173a59cbd2271aca772b24c568b77fa37997d0bd095e032ffb94d897a5e4d9ebc102e8eb69acb04a57f1938cd92fe98e306e grsecurity-2.9.1-3.10.7-201308171249.patch
+3e87e48d009f05bbaafad55b1f601dc84e6f095b14ec1ad3fe68b37d6722bf47f2482639a7e21b00e8a13f141f3f0e78bdb79e049661eef2aea1c9b93579734b patch-3.10.10.xz
+6ab1b72480b91d1a8916769191051fd76a19231ad253d81aa1ed866cbb06512eb7fbee53a0d9fb0b584c0de663f1156958ca4e1194e1446ffa860c129b00ff8b grsecurity-2.9.1-3.10.10-201309011630.patch
81e78593288e8b0fd2c03ea9fc1450323887707f087e911f172450a122bc9b591ee83394836789730d951aeec13d0b75a64e1c05f04364abf8f80d883ddc4a02 0001-net-inform-NETDEV_CHANGE-callbacks-which-flags-were-.patch
51ecb15b669f6a82940a13a38939116e003bf5dfd24496771c8279e907b72adcc63d607f0340a2940d757e12ddadb7d45c7af78ae311d284935a6296dbcac00c 0002-arp-flush-arp-cache-on-IFF_NOARP-change.patch
57d0a8bd35d19cf657ded58efe24517d2252aec6984040713ba173a34edb5887ececaa2985076bc6a149eaa57639fd98a042c1c2d226ed4ad8dd5ed0e230717e 0003-ipv4-properly-refresh-rtable-entries-on-pmtu-redirec.patch
d2f578ad1d6e1fe52b55863e5bf338ae8201b828a498ec3e42e549c55295d3d1c6c3adfa9e226d711e3486628ed56ab996484e219d79ac4b0c0ec684ebd380aa 0004-ipv4-rate-limit-updating-of-next-hop-exceptions-with.patch
28a33e644bf2faf99c8dd6dbccfe14e140dfdd8824a8fb2d58aa7deb9e572f130d92b6b35ee181084050d82166bdf2e498a451a2a538a67b7ab84204405d2d87 0005-ipv4-use-separate-genid-for-next-hop-exceptions.patch
249140374c19a5599876268ff5b3cda2e136681aee103b4a9fff5d7d346f8e3295a907fb43db0701b8a9fece64c299ad2abac0434259cce6631307ce84090205 0006-ipv4-use-next-hop-exceptions-also-for-input-routes.patch
-3f1965a6c5fc9dc2cd3da407edab473caa964ef7cddba711f6c98b1710d2e50c7bba4ccb21ada794a387f100903ab16feebfd4910a6033d889878100a6bb4e77 kernelconfig.x86
-23fc5c7807d9b4804a2e4cd65c597bec1ca7117e35c1e9b001c0c6d6ff9d736ef166515c1a0a9545a66d78b75e411e192c4cff96f9cbfee1cdd2260d46b6bec0 kernelconfig.x86_64"
+08f5bb86f03415f60e9bb171ffa192fa70fa85dafeada10817a4b8ae7932fffd1b0f2df01750407e576bb71cd6c3f2c5fe76f2613922cd77f398bce134fd3ec2 kernelconfig.x86
+7a373bec279fe00601bff7ed64c5a09bb4318bf07557fece194afb426d78bf13191b0510642192518f3ffa68b9a36aa6f971f77f55ea09ae4f42c4a84afc00a5 kernelconfig.x86_64"
diff --git a/main/linux-virt-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch b/main/linux-virt-grsec/grsecurity-2.9.1-3.10.10-201309011630.patch
index 9a72c3e12c..54e508953f 100644
--- a/main/linux-virt-grsec/grsecurity-2.9.1-3.10.7-201308171249.patch
+++ b/main/linux-virt-grsec/grsecurity-2.9.1-3.10.10-201309011630.patch
@@ -281,7 +281,7 @@ index 2fe6e76..889ee23 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 33e36ab..31f1dc8 100644
+index b119684..13ac256 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -1968,7 +1968,7 @@ index 86b8fe3..e25f975 100644
#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index 9bcd262..fba731c 100644
+index 9bcd262..1ff999b 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -30,6 +30,9 @@
@@ -1991,20 +1991,18 @@ index 9bcd262..fba731c 100644
extern void __pte_error(const char *file, int line, pte_t);
extern void __pmd_error(const char *file, int line, pmd_t);
extern void __pgd_error(const char *file, int line, pgd_t);
-@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -53,6 +59,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
+#define __HAVE_ARCH_PAX_OPEN_KERNEL
+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
+
-+#ifdef CONFIG_PAX_KERNEXEC
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/domain.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
-+#endif
+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+static inline int test_domain(int domain, int domaintype)
+{
+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
@@ -2042,7 +2040,7 @@ index 9bcd262..fba731c 100644
/*
* This is the lowest virtual address we can permit any user space
* mapping to be mapped at. This is particularly important for
-@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -72,8 +120,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
/*
* The pgprot_* and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy,
@@ -2053,7 +2051,7 @@ index 9bcd262..fba731c 100644
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
-@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+@@ -257,7 +305,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
@@ -2144,33 +2142,6 @@ index f00b569..aa5bb41 100644
/*
* Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
-index bdf2b84..aa9b4ac 100644
---- a/arch/arm/include/asm/tlb.h
-+++ b/arch/arm/include/asm/tlb.h
-@@ -43,6 +43,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- struct vm_area_struct *vma;
-+ unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
-@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = fullmm;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f760..de33b13 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -2889,33 +2860,18 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
-index d9f5cd4..e186ee1 100644
+index e19edc6..e186ee1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
-@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
- static int
- armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
- {
-- int mapping = (*event_map)[config];
-+ int mapping;
-+
-+ if (config >= PERF_COUNT_HW_MAX)
-+ return -EINVAL;
-+
-+ mapping = (*event_map)[config];
- return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
- }
-
-@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu *leader_pmu = event->group_leader->pmu;
+@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ int mapping;
-+ if (is_software_event(event))
-+ return 1;
-+
- if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
- return 1;
+ if (config >= PERF_COUNT_HW_MAX)
+- return -ENOENT;
++ return -EINVAL;
+ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e..b36e225 100644
--- a/arch/arm/kernel/perf_event_cpu.c
@@ -2930,21 +2886,9 @@ index 1f2740e..b36e225 100644
};
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 5bc2615..4f1a0c2 100644
+index 5bc2615..dcd439f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -28,10 +28,10 @@
- #include <linux/tick.h>
- #include <linux/utsname.h>
- #include <linux/uaccess.h>
--#include <linux/random.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/cpuidle.h>
- #include <linux/leds.h>
-+#include <linux/random.h>
-
- #include <asm/cacheflush.h>
- #include <asm/idmap.h>
@@ -223,6 +223,7 @@ void machine_power_off(void)
if (pm_power_off)
@@ -3327,6 +3271,46 @@ index 33f2ea3..0b91824 100644
. = ALIGN(THREAD_SIZE);
__data_loc = .;
#endif
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index ef1703b..46b77f3 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+
+ /* The VMID used in the VTTBR */
+-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u8 kvm_next_vmid;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+@@ -392,7 +392,7 @@ void force_vm_exit(const cpumask_t *mask)
+ */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
+ }
+
+ /**
+@@ -425,7 +425,7 @@ static void update_vttbr(struct kvm *kvm)
+
+ /* First user of a new VMID generation? */
+ if (unlikely(kvm_next_vmid == 0)) {
+- atomic64_inc(&kvm_vmid_gen);
++ atomic64_inc_unchecked(&kvm_vmid_gen);
+ kvm_next_vmid = 1;
+
+ /*
+@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
++ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 14a0d98..7771a7d 100644
--- a/arch/arm/lib/clear_user.S
@@ -3688,7 +3672,7 @@ index cad3ca86..1d79e0f 100644
extern void ux500_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index 2950082..d0f0782 100644
+index 08c9fe9..191320c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -436,7 +436,7 @@ config CPU_32v5
@@ -3716,8 +3700,8 @@ index 2950082..d0f0782 100644
help
Warning: disabling this option may break user programs.
-@@ -790,7 +792,7 @@ config KUSER_HELPERS
- run on ARMv4 through to ARMv7 without modification.
+@@ -792,7 +794,7 @@ config KUSER_HELPERS
+ See Documentation/arm/kernel_user_helpers.txt for details.
However, the fixed address nature of these helpers can be used
- by ROP (return orientated programming) authors when creating
@@ -3790,8 +3774,56 @@ index 6f4585b..7b6f52b 100644
if (err) \
goto fault; \
} while (0)
+diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
+index eeab06e..2638dc2 100644
+--- a/arch/arm/mm/context.c
++++ b/arch/arm/mm/context.c
+@@ -42,7 +42,7 @@
+ #define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+ static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+
+ static DEFINE_PER_CPU(atomic64_t, active_asids);
+@@ -188,7 +188,7 @@ static int is_reserved_asid(u64 asid)
+ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ {
+ u64 asid = atomic64_read(&mm->context.id);
+- u64 generation = atomic64_read(&asid_generation);
++ u64 generation = atomic64_read_unchecked(&asid_generation);
+
+ if (asid != 0 && is_reserved_asid(asid)) {
+ /*
+@@ -206,7 +206,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ if (asid == NUM_USER_ASIDS) {
+- generation = atomic64_add_return(ASID_FIRST_VERSION,
++ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+@@ -235,14 +235,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+ cpu_set_reserved_ttbr0();
+
+ asid = atomic64_read(&mm->context.id);
+- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
++ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+ goto switch_mm_fastpath;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ /* Check that our ASID belongs to the current generation. */
+ asid = atomic64_read(&mm->context.id);
+- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
++ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 5dbf13f..ee1ec24 100644
+index 5dbf13f..a2d1876 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,7 @@
@@ -3894,7 +3926,7 @@ index 5dbf13f..ee1ec24 100644
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
-@@ -569,15 +631,67 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+@@ -569,15 +631,68 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
ifsr_info[nr].name = name;
}
@@ -3906,18 +3938,19 @@ index 5dbf13f..ee1ec24 100644
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
struct siginfo info;
-
++ unsigned long pc = instruction_pointer(regs);
++
+ if (user_mode(regs)) {
+ unsigned long sigpage = current->mm->context.sigpage;
+
-+ if (sigpage <= addr && addr < sigpage + 7*4) {
-+ if (addr < sigpage + 3*4)
++ if (sigpage <= pc && pc < sigpage + 7*4) {
++ if (pc < sigpage + 3*4)
+ sys_sigreturn(regs);
+ else
+ sys_rt_sigreturn(regs);
+ return;
+ }
-+ if (addr == 0xffff0fe0UL) {
++ if (pc == 0xffff0fe0UL) {
+ /*
+ * PaX: __kuser_get_tls emulation
+ */
@@ -3932,11 +3965,11 @@ index 5dbf13f..ee1ec24 100644
+ if (current->signal->curr_ip)
+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
-+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+ else
+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
-+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+ goto die;
+ }
+#endif
@@ -3945,7 +3978,7 @@ index 5dbf13f..ee1ec24 100644
+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
+ unsigned int bkpt;
+
-+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
++ if (!probe_kernel_address((unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
+ current->thread.error_code = ifsr;
+ current->thread.trap_no = 0;
+ pax_report_refcount_overflow(regs);
@@ -3954,7 +3987,7 @@ index 5dbf13f..ee1ec24 100644
+ }
+ }
+#endif
-+
+
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
@@ -4543,33 +4576,6 @@ index ce6d763..cfea917 100644
extern void *samsung_dmadev_get_ops(void);
extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
-index 654f096..5546653 100644
---- a/arch/arm64/include/asm/tlb.h
-+++ b/arch/arm64/include/asm/tlb.h
-@@ -35,6 +35,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- struct vm_area_struct *vma;
-+ unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
-@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = fullmm;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f4726dc..39ed646 100644
--- a/arch/arm64/kernel/debug-monitors.c
@@ -4979,45 +4985,6 @@ index 54ff557..70c88b7 100644
}
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
-index ef3a9de..bc5efc7 100644
---- a/arch/ia64/include/asm/tlb.h
-+++ b/arch/ia64/include/asm/tlb.h
-@@ -22,7 +22,7 @@
- * unmapping a portion of the virtual address space, these hooks are called according to
- * the following template:
- *
-- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
-+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
- * {
- * for each vma that needs a shootdown do {
- * tlb_start_vma(tlb, vma);
-@@ -58,6 +58,7 @@ struct mmu_gather {
- unsigned int max;
- unsigned char fullmm; /* non-zero means full mm flush */
- unsigned char need_flush; /* really unmapped some PTEs? */
-+ unsigned long start, end;
- unsigned long start_addr;
- unsigned long end_addr;
- struct page **pages;
-@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
-- tlb->fullmm = full_mm_flush;
-+ tlb->fullmm = !(start | (end+1));
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->start_addr = ~0UL;
- }
-
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 449c8c0..18965fb 100644
--- a/arch/ia64/include/asm/uaccess.h
@@ -5467,10 +5434,10 @@ index 4efe96a..60e8699 100644
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
-index 08b6079..eb272cf 100644
+index 08b6079..8b554d2 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
-@@ -21,6 +21,10 @@
+@@ -21,15 +21,39 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
@@ -5480,24 +5447,899 @@ index 08b6079..eb272cf 100644
+
#define ATOMIC_INIT(i) { (i) }
++#ifdef CONFIG_64BIT
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .dword " #from ", " #to"\n" \
++" .previous\n"
++#else
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .word " #from ", " #to"\n" \
++" .previous\n"
++#endif
++
/*
-@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
*/
- #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
+-#define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read(const atomic_t *v)
++{
++ return (*(volatile const int *) &v->counter);
++}
++
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return (*(volatile const int *) &v->counter);
++}
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+ /*
+ * atomic_set - set atomic variable
+@@ -38,7 +62,15 @@
+ *
+ * Atomically sets the value of @v to @i.
+ */
+-#define atomic_set(v, i) ((v)->counter = (i))
++static inline void atomic_set(atomic_t *v, int i)
++{
++ v->counter = i;
++}
+
- #endif /* CONFIG_64BIT */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ /*
+ * atomic_add - add integer to atomic variable
+@@ -47,7 +79,67 @@
+ *
+ * Atomically adds @i to @v.
+ */
+-static __inline__ void atomic_add(int i, atomic_t * v)
++static __inline__ void atomic_add(int i, atomic_t *v)
++{
++ int temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: add %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " addu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ int temp;
+@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
+ *
+ * Atomically subtracts @i from @v.
+ */
+-static __inline__ void atomic_sub(int i, atomic_t * v)
++static __inline__ void atomic_sub(int i, atomic_t *v)
++{
++ int temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: sub %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " subu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ int temp;
+@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
+ /*
+ * Same as above, but return the result value
+ */
+-static __inline__ int atomic_add_return(int i, atomic_t * v)
++static __inline__ int atomic_add_return(int i, atomic_t *v)
++{
++ int result;
++ int temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: add %0, %1, %3 \n"
++#else
++ " addu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: addu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: add %0, %1, %3 \n"
++#else
++ " addu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: addu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " lw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++
++static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+ int result;
+
+@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
+ return result;
+ }
+
+-static __inline__ int atomic_sub_return(int i, atomic_t * v)
++static __inline__ int atomic_sub_return(int i, atomic_t *v)
++{
++ int result;
++ int temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: sub %0, %1, %3 \n"
++#else
++ " subu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: subu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: sub %0, %1, %3 \n"
++#else
++ " subu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: subu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " lw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+ int result;
+
+@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+ int result;
+
+@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ return result;
+ }
+
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++ int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_sub_and_test - subtract value from variable and test result
+@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+
+ /*
+ * atomic_dec_and_test - decrement by 1 and test
+@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+
+ /*
+ * atomic_dec - decrement and test
+@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic_dec(v) atomic_sub(1, (v))
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ /*
+ * atomic_add_negative - add and test if negative
+@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * @v: pointer of type atomic64_t
+ *
+ */
+-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++ return (*(volatile const long *) &v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *) &v->counter);
++}
/*
+ * atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+-#define atomic64_set(v, i) ((v)->counter = (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++ v->counter = i;
++}
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ /*
+ * atomic64_add - add integer to atomic variable
+@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ *
+ * Atomically adds @i to @v.
+ */
+-static __inline__ void atomic64_add(long i, atomic64_t * v)
++static __inline__ void atomic64_add(long i, atomic64_t *v)
++{
++ long temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dadd %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " daddu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ long temp;
+@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
+ *
+ * Atomically subtracts @i from @v.
+ */
+-static __inline__ void atomic64_sub(long i, atomic64_t * v)
++static __inline__ void atomic64_sub(long i, atomic64_t *v)
++{
++ long temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dsub %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " dsubu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ long temp;
+@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
+ /*
+ * Same as above, but return the result value
+ */
+-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
++static __inline__ long atomic64_add_return(long i, atomic64_t *v)
++{
++ long result;
++ long temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dadd %0, %1, %3 \n"
++#else
++ " daddu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: daddu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dadd %0, %1, %3 \n"
++#else
++ " daddu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: daddu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " ld %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " sd %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ long result;
+
+@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
+ return result;
+ }
+
+-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
++{
++ long result;
++ long temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dsub %0, %1, %3 \n"
++#else
++ " dsubu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: dsubu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dsub %0, %1, %3 \n"
++#else
++ " dsubu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: dsubu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " ld %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " sd %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++
++static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ long result;
+
+@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+ long result;
+
+@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ return result;
+ }
+
+-#define atomic64_cmpxchg(v, o, n) \
+- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+
+ /*
+ * atomic64_sub_and_test - subtract value from variable and test result
+@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * other cases.
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic64_dec_and_test - decrement by 1 and test
+@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+
+ /*
+ * atomic64_dec - decrement and test
+@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
+
+ /*
+ * atomic64_add_negative - add and test if negative
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index b4db69f..8f3b093 100644
--- a/arch/mips/include/asm/cache.h
@@ -5742,6 +6584,31 @@ index 202e581..689ca79 100644
#include <asm/processor.h>
/*
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index d1fea7a..45602ea 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
+ printk("unexpected IRQ # %d\n", irq);
+ }
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+ {
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ return 0;
+ }
+
+ asmlinkage void spurious_interrupt(void)
+ {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init init_IRQ(void)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c6a041d..b3e7318 100644
--- a/arch/mips/kernel/process.c
@@ -5841,6 +6708,98 @@ index 74f485d..47d2c38 100644
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
+diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
+index 1ff43d5..96fec68 100644
+--- a/arch/mips/kernel/sync-r4k.c
++++ b/arch/mips/kernel/sync-r4k.c
+@@ -21,8 +21,8 @@
+ #include <asm/mipsregs.h>
+
+ static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
+-static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
+-static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
++static atomic_unchecked_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
++static atomic_unchecked_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
+ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
+
+ #define COUNTON 100
+@@ -69,13 +69,13 @@ void __cpuinit synchronise_count_master(int cpu)
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+- while (atomic_read(&count_count_start) != 1)
++ while (atomic_read_unchecked(&count_count_start) != 1)
+ mb();
+- atomic_set(&count_count_stop, 0);
++ atomic_set_unchecked(&count_count_stop, 0);
+ smp_wmb();
+
+ /* this lets the slaves write their count register */
+- atomic_inc(&count_count_start);
++ atomic_inc_unchecked(&count_count_start);
+
+ /*
+ * Everyone initialises count in the last loop:
+@@ -86,11 +86,11 @@ void __cpuinit synchronise_count_master(int cpu)
+ /*
+ * Wait for all slaves to leave the synchronization point:
+ */
+- while (atomic_read(&count_count_stop) != 1)
++ while (atomic_read_unchecked(&count_count_stop) != 1)
+ mb();
+- atomic_set(&count_count_start, 0);
++ atomic_set_unchecked(&count_count_start, 0);
+ smp_wmb();
+- atomic_inc(&count_count_stop);
++ atomic_inc_unchecked(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+@@ -131,8 +131,8 @@ void __cpuinit synchronise_count_slave(int cpu)
+ initcount = atomic_read(&count_reference);
+
+ for (i = 0; i < NR_LOOPS; i++) {
+- atomic_inc(&count_count_start);
+- while (atomic_read(&count_count_start) != 2)
++ atomic_inc_unchecked(&count_count_start);
++ while (atomic_read_unchecked(&count_count_start) != 2)
+ mb();
+
+ /*
+@@ -141,8 +141,8 @@ void __cpuinit synchronise_count_slave(int cpu)
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+- atomic_inc(&count_count_stop);
+- while (atomic_read(&count_count_stop) != 2)
++ atomic_inc_unchecked(&count_count_stop);
++ while (atomic_read_unchecked(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index a75ae40..0d0f56a 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -675,7 +675,17 @@ asmlinkage void do_ov(struct pt_regs *regs)
+ {
+ siginfo_t info;
+
+- die_if_kernel("Integer overflow", regs);
++ if (unlikely(!user_mode(regs))) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fixup_exception(regs)) {
++ pax_report_refcount_overflow(regs);
++ return;
++ }
++#endif
++
++ die("Integer overflow", regs);
++ }
+
+ info.si_code = FPE_INTOVF;
+ info.si_signo = SIGFPE;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 0fead53..eeb00a6 100644
--- a/arch/mips/mm/fault.c
@@ -5991,6 +6950,31 @@ index 7e5fe27..9656513 100644
int __virt_addr_valid(const volatile void *kaddr)
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
+index a2358b4..7cead4f 100644
+--- a/arch/mips/sgi-ip27/ip27-nmi.c
++++ b/arch/mips/sgi-ip27/ip27-nmi.c
+@@ -187,9 +187,9 @@ void
+ cont_nmi_dump(void)
+ {
+ #ifndef REAL_NMI_SIGNAL
+- static atomic_t nmied_cpus = ATOMIC_INIT(0);
++ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
+
+- atomic_inc(&nmied_cpus);
++ atomic_inc_unchecked(&nmied_cpus);
+ #endif
+ /*
+ * Only allow 1 cpu to proceed
+@@ -233,7 +233,7 @@ cont_nmi_dump(void)
+ udelay(10000);
+ }
+ #else
+- while (atomic_read(&nmied_cpus) != num_online_cpus());
++ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
+ #endif
+
+ /*
diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
index 967d144..db12197 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
@@ -7645,34 +8629,6 @@ index c4a93d6..4d2a9b4 100644
+#define arch_align_stack(x) ((x) & ~0xfUL)
#endif /* __ASM_EXEC_H */
-diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
-index b75d7d6..6d6d92b 100644
---- a/arch/s390/include/asm/tlb.h
-+++ b/arch/s390/include/asm/tlb.h
-@@ -32,6 +32,7 @@ struct mmu_gather {
- struct mm_struct *mm;
- struct mmu_table_batch *batch;
- unsigned int fullmm;
-+ unsigned long start, end;
- };
-
- struct mmu_table_batch {
-@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
- static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm,
-- unsigned int full_mm_flush)
-+ unsigned long start,
-+ unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
- tlb->batch = NULL;
- if (tlb->fullmm)
- __tlb_flush_mm(mm);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4..e40cbef 100644
--- a/arch/s390/include/asm/uaccess.h
@@ -7941,25 +8897,6 @@ index ef9e555..331bd29 100644
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
-index e61d43d..362192e 100644
---- a/arch/sh/include/asm/tlb.h
-+++ b/arch/sh/include/asm/tlb.h
-@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
- }
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 03f2b55..b0270327 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -8805,6 +9742,57 @@ index 7ff45e4..a58f271 100644
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 77539ed..3ffffe7 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -868,8 +868,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
+ extern unsigned long xcall_flush_dcache_page_spitfire;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+ #endif
+
+ static inline void __local_flush_dcache_page(struct page *page)
+@@ -893,7 +893,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ return;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ this_cpu = get_cpu();
+@@ -917,7 +917,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ }
+@@ -936,7 +936,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_disable();
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ data0 = 0;
+ pg_addr = page_address(page);
+@@ -953,7 +953,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ __local_flush_dcache_page(page);
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 3a8d184..49498a8 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
@@ -9078,7 +10066,7 @@ index 6629829..036032d 100644
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index b3f833a..ac74b2d 100644
+index b3f833a..f485f80 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
@@ -9148,6 +10136,55 @@ index b3f833a..ac74b2d 100644
}
struct sun4v_error_entry {
+@@ -1830,8 +1841,8 @@ struct sun4v_error_entry {
+ /*0x38*/u64 reserved_5;
+ };
+
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+ static const char *sun4v_err_type_to_str(u8 type)
+ {
+@@ -1923,7 +1934,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
+ }
+
+ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+- int cpu, const char *pfx, atomic_t *ocnt)
++ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+ u64 *raw_ptr = (u64 *) ent;
+ u32 attrs;
+@@ -1981,8 +1992,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+
+ show_regs(regs);
+
+- if ((cnt = atomic_read(ocnt)) != 0) {
+- atomic_set(ocnt, 0);
++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++ atomic_set_unchecked(ocnt, 0);
+ wmb();
+ printk("%s: Queue overflowed %d times.\n",
+ pfx, cnt);
+@@ -2036,7 +2047,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+ */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+- atomic_inc(&sun4v_resum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -2089,7 +2100,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ /* XXX Actually even this can make not that much sense. Perhaps
+ * XXX we should just pull the plug and panic directly from here?
+ */
+- atomic_inc(&sun4v_nonresum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+
+ unsigned long sun4v_err_itlb_vaddr;
@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
@@ -10435,6 +11472,44 @@ index d2b5944..d878f3c 100644
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 04fd55a..4ede686 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+
+@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+ BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+- atomic_read(&dcpage_flushes));
++ atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+- atomic_read(&dcpage_flushes_xcall));
++ atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index f4500c6..889656c 100644
--- a/arch/tile/include/asm/atomic_64.h
@@ -10595,25 +11670,6 @@ index 0032f92..cd151e0 100644
#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
-index 4febacd..29b0301 100644
---- a/arch/um/include/asm/tlb.h
-+++ b/arch/um/include/asm/tlb.h
-@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
-
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-- tlb->fullmm = full_mm_flush;
-+ tlb->start = start;
-+ tlb->end = end;
-+ tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
- }
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index bbcef52..6a2a483 100644
--- a/arch/um/kernel/process.c
@@ -15963,7 +17019,7 @@ index e642300..0ef8f31 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 22224b3..c5d8d7d 100644
+index 22224b3..b3a2f90 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
@@ -16006,7 +17062,39 @@ index 22224b3..c5d8d7d 100644
#endif
#ifdef CONFIG_X86_32
unsigned long ip;
-@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
+ extern unsigned long mmu_cr4_features;
+ extern u32 *trampoline_cr4_features;
+
+-static inline void set_in_cr4(unsigned long mask)
+-{
+- unsigned long cr4;
+-
+- mmu_cr4_features |= mask;
+- if (trampoline_cr4_features)
+- *trampoline_cr4_features = mmu_cr4_features;
+- cr4 = read_cr4();
+- cr4 |= mask;
+- write_cr4(cr4);
+-}
+-
+-static inline void clear_in_cr4(unsigned long mask)
+-{
+- unsigned long cr4;
+-
+- mmu_cr4_features &= ~mask;
+- if (trampoline_cr4_features)
+- *trampoline_cr4_features = mmu_cr4_features;
+- cr4 = read_cr4();
+- cr4 &= ~mask;
+- write_cr4(cr4);
+-}
++extern void set_in_cr4(unsigned long mask);
++extern void clear_in_cr4(unsigned long mask);
+
+ typedef struct {
+ unsigned long seg;
+@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE
@@ -16027,7 +17115,7 @@ index 22224b3..c5d8d7d 100644
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
-@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define INIT_TSS { \
.x86_tss = { \
@@ -16036,7 +17124,7 @@ index 22224b3..c5d8d7d 100644
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
@@ -16049,7 +17137,7 @@ index 22224b3..c5d8d7d 100644
/*
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
@@ -16058,7 +17146,7 @@ index 22224b3..c5d8d7d 100644
__regs__ - 1; \
})
-@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*
* User space process size. 47bits minus one guard page.
*/
@@ -16074,7 +17162,7 @@ index 22224b3..c5d8d7d 100644
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
@@ -16088,7 +17176,7 @@ index 22224b3..c5d8d7d 100644
}
/*
-@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -16099,7 +17187,7 @@ index 22224b3..c5d8d7d 100644
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
/* Get/set a process' ability to use the timestamp counter instruction */
-@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
+@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
@@ -16109,7 +17197,7 @@ index 22224b3..c5d8d7d 100644
};
static inline void get_aperfmperf(struct aperfmperf *am)
-@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
+@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
return ratio;
}
@@ -16118,7 +17206,7 @@ index 22224b3..c5d8d7d 100644
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
+@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
#define xen_set_default_idle 0
#endif
@@ -16960,31 +18048,33 @@ index a1df6e8..e002940 100644
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
-index 50a7fc0..d00c622 100644
+index 50a7fc0..45844c0 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
-@@ -17,18 +17,39 @@
+@@ -17,18 +17,44 @@
static inline void __native_flush_tlb(void)
{
-- native_write_cr3(native_read_cr3());
++ if (static_cpu_has(X86_FEATURE_INVPCID)) {
++ unsigned long descriptor[2];
++
++ descriptor[0] = PCID_KERNEL;
++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
++ return;
++ }
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+ if (static_cpu_has(X86_FEATURE_PCID)) {
+ unsigned int cpu = raw_get_cpu();
+
-+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
-+ unsigned long descriptor[2];
-+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
-+ } else {
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
-+ }
++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+ raw_put_cpu_no_resched();
-+ } else
++ return;
++ }
+#endif
+
-+ native_write_cr3(native_read_cr3());
+ native_write_cr3(native_read_cr3());
}
static inline void __native_flush_tlb_global_irq_disabled(void)
@@ -16992,15 +18082,17 @@ index 50a7fc0..d00c622 100644
- unsigned long cr4;
+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
+ unsigned long descriptor[2];
-+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
-+ } else {
-+ unsigned long cr4;
- cr4 = native_read_cr4();
- /* clear PGE */
- native_write_cr4(cr4 & ~X86_CR4_PGE);
- /* write old PGE again and flush TLBs */
- native_write_cr4(cr4);
++ descriptor[0] = PCID_KERNEL;
++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
++ } else {
++ unsigned long cr4;
++
+ cr4 = native_read_cr4();
+ /* clear PGE */
+ native_write_cr4(cr4 & ~X86_CR4_PGE);
@@ -17010,41 +18102,49 @@ index 50a7fc0..d00c622 100644
}
static inline void __native_flush_tlb_global(void)
-@@ -49,7 +70,33 @@ static inline void __native_flush_tlb_global(void)
+@@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
static inline void __native_flush_tlb_single(unsigned long addr)
{
-- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ if (static_cpu_has(X86_FEATURE_PCID) && addr < TASK_SIZE_MAX) {
-+ unsigned int cpu = raw_get_cpu();
++ if (static_cpu_has(X86_FEATURE_INVPCID)) {
++ unsigned long descriptor[2];
+
-+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
-+ unsigned long descriptor[2];
-+ descriptor[0] = PCID_USER;
-+ descriptor[1] = addr;
++ descriptor[0] = PCID_KERNEL;
++ descriptor[1] = addr;
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
++ if (addr < TASK_SIZE_MAX)
++ descriptor[1] += pax_user_shadow_base;
+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
-+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
-+ descriptor[0] = PCID_KERNEL;
-+ descriptor[1] = addr + pax_user_shadow_base;
-+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
-+ }
-+ } else {
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
-+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
-+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
-+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF))
-+ asm volatile("invlpg (%0)" ::"r" (addr + pax_user_shadow_base) : "memory");
+ }
-+ raw_put_cpu_no_resched();
-+ } else
++
++ descriptor[0] = PCID_USER;
++ descriptor[1] = addr;
+#endif
+
++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++ return;
++ }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (static_cpu_has(X86_FEATURE_PCID)) {
++ unsigned int cpu = raw_get_cpu();
++
++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++ raw_put_cpu_no_resched();
++
++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
++ addr += pax_user_shadow_base;
++ }
++#endif
++
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
- static inline void __flush_tlb_all(void)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5ee2687..74590b9 100644
--- a/arch/x86/include/asm/uaccess.h
@@ -17496,7 +18596,7 @@ index 7f760a9..04b1c65 100644
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
-index 142810c..1f2a0a7 100644
+index 142810c..1dbe82f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -10,6 +10,9 @@
@@ -17815,8 +18915,9 @@ index 142810c..1f2a0a7 100644
}
}
- static __must_check __always_inline int
+-static __must_check __always_inline int
-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++static __must_check __always_inline unsigned long
+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
{
- return copy_user_generic(dst, (__force const void *)src, size);
@@ -18604,7 +19705,7 @@ index 5013a48..0782c53 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 22018f7..a5883af 100644
+index 22018f7..df77e23 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -18668,48 +19769,65 @@ index 22018f7..a5883af 100644
static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -288,6 +234,40 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
set_in_cr4(X86_CR4_SMAP);
}
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#ifdef CONFIG_X86_64
+static __init int setup_disable_pcid(char *arg)
+{
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
+ if (clone_pgd_mask != ~(pgdval_t)0UL)
+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++#endif
++
+ return 1;
+}
+__setup("nopcid", setup_disable_pcid);
+
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
-+ if (cpu_has(c, X86_FEATURE_PCID))
-+ printk("PAX: PCID detected\n");
++ if (!cpu_has(c, X86_FEATURE_PCID)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (clone_pgd_mask != ~(pgdval_t)0UL) {
++ pax_open_kernel();
++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++ pax_close_kernel();
++ printk("PAX: slow and weak UDEREF enabled\n");
++ } else
++ printk("PAX: UDEREF disabled\n");
++#endif
++
++ return;
++ }
++
++ printk("PAX: PCID detected\n");
++ set_in_cr4(X86_CR4_PCIDE);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_open_kernel();
++ clone_pgd_mask = ~(pgdval_t)0UL;
++ pax_close_kernel();
++ if (pax_user_shadow_base)
++ printk("PAX: weak UDEREF enabled\n");
++ else {
++ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
++ printk("PAX: strong UDEREF enabled\n");
++ }
++#endif
+
+ if (cpu_has(c, X86_FEATURE_INVPCID))
+ printk("PAX: INVPCID detected\n");
-+
-+ if (cpu_has(c, X86_FEATURE_PCID)) {
-+ set_in_cr4(X86_CR4_PCIDE);
-+ clone_pgd_mask = ~(pgdval_t)0UL;
-+ if (pax_user_shadow_base)
-+ printk("PAX: weak UDEREF enabled\n");
-+ else {
-+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
-+ printk("PAX: strong UDEREF enabled\n");
-+ }
-+ } else if (pax_user_shadow_base)
-+ printk("PAX: slow and weak UDEREF enabled\n");
-+ else
-+ printk("PAX: UDEREF disabled\n");
+}
+#endif
+
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -386,7 +366,7 @@ void switch_to_new_gdt(int cpu)
+@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -18718,18 +19836,18 @@ index 22018f7..a5883af 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -874,6 +854,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#ifdef CONFIG_X86_64
+ setup_pcid(c);
+#endif
+
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -882,6 +866,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
@@ -18740,7 +19858,7 @@ index 22018f7..a5883af 100644
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
-@@ -1069,10 +1057,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -18755,7 +19873,7 @@ index 22018f7..a5883af 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
-@@ -1086,7 +1076,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -18764,7 +19882,7 @@ index 22018f7..a5883af 100644
EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1231,7 +1221,7 @@ void __cpuinit cpu_init(void)
+@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
load_ucode_ap();
cpu = stack_smp_processor_id();
@@ -18773,7 +19891,7 @@ index 22018f7..a5883af 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1257,7 +1247,7 @@ void __cpuinit cpu_init(void)
+@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
switch_to_new_gdt(cpu);
loadsegment(fs, 0);
@@ -18782,7 +19900,7 @@ index 22018f7..a5883af 100644
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
-@@ -1266,7 +1256,6 @@ void __cpuinit cpu_init(void)
+@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -18790,7 +19908,7 @@ index 22018f7..a5883af 100644
enable_x2apic();
/*
-@@ -1318,7 +1307,7 @@ void __cpuinit cpu_init(void)
+@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -19193,7 +20311,7 @@ index a9e2207..d70c83a 100644
intel_ds_init();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 52441a2..f94fae8 100644
+index 8aac56b..588fb13 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -20461,7 +21579,7 @@ index 8f3e2de..6b71e39 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 7272089..833fdf8 100644
+index 7272089..0b74104 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -59,6 +59,8 @@
@@ -20548,7 +21666,7 @@ index 7272089..833fdf8 100644
#endif
-@@ -284,6 +293,427 @@ ENTRY(native_usergs_sysret64)
+@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -20738,10 +21856,11 @@ index 7272089..833fdf8 100644
+ .popsection
+ GET_CR3_INTO_RDI
+ cmp $1,%dil
-+ jnz 3f
++ jnz 4f
+ sub $4097,%rdi
+ bts $63,%rdi
-+ jmp 2f
++ SET_RDI_INTO_CR3
++ jmp 3f
+111:
+
+ GET_CR3_INTO_RDI
@@ -20772,13 +21891,15 @@ index 7272089..833fdf8 100644
+ i = i + 1
+ .endr
+
++2: SET_RDI_INTO_CR3
++
+#ifdef CONFIG_PAX_KERNEXEC
+ GET_CR0_INTO_RDI
+ bts $16,%rdi
+ SET_RDI_INTO_CR0
+#endif
+
-+2: SET_RDI_INTO_CR3
++3:
+
+#ifdef CONFIG_PARAVIRT
+ PV_RESTORE_REGS(CLBR_RDI)
@@ -20788,7 +21909,7 @@ index 7272089..833fdf8 100644
+ popq %rdi
+ pax_force_retaddr
+ retq
-+3: ud2
++4: ud2
+ENDPROC(pax_enter_kernel_user)
+
+ENTRY(pax_exit_kernel_user)
@@ -20814,14 +21935,22 @@ index 7272089..833fdf8 100644
+ SET_RDI_INTO_CR3
+ jmp 2f
+1:
++
+ mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ jnc 3f
++ SET_RDI_INTO_CR0
++#endif
++
+ add $__START_KERNEL_map,%rbx
+ sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+ cmpl $0, pv_info+PARAVIRT_enabled
+ jz 1f
-+ pushq %rdi
+ i = 0
+ .rept USER_PGD_PTRS
+ mov i*8(%rbx),%rsi
@@ -20830,18 +21959,10 @@ index 7272089..833fdf8 100644
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+ i = i + 1
+ .endr
-+ popq %rdi
+ jmp 2f
+1:
+#endif
+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ btr $16,%rdi
-+ jnc 3f
-+ SET_RDI_INTO_CR0
-+#endif
-+
+ i = 0
+ .rept USER_PGD_PTRS
+ movb $0x67,i*8(%rbx)
@@ -20976,7 +22097,7 @@ index 7272089..833fdf8 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -375,8 +805,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro UNFAKE_STACK_FRAME
@@ -20987,7 +22108,7 @@ index 7272089..833fdf8 100644
.endm
/*
-@@ -463,7 +893,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
movq %rsp, %rsi
leaq -RBP(%rsp),%rdi /* arg1 for handler */
@@ -20996,7 +22117,7 @@ index 7272089..833fdf8 100644
je 1f
SWAPGS
/*
-@@ -498,9 +928,10 @@ ENTRY(save_rest)
+@@ -498,9 +931,10 @@ ENTRY(save_rest)
movq_cfi r15, R15+16
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
@@ -21008,7 +22129,7 @@ index 7272089..833fdf8 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -529,9 +960,10 @@ ENTRY(save_paranoid)
+@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -21021,7 +22142,7 @@ index 7272089..833fdf8 100644
.popsection
/*
-@@ -553,7 +985,7 @@ ENTRY(ret_from_fork)
+@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -21030,7 +22151,7 @@ index 7272089..833fdf8 100644
jz 1f
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -571,7 +1003,7 @@ ENTRY(ret_from_fork)
+@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21039,7 +22160,7 @@ index 7272089..833fdf8 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -608,7 +1040,7 @@ END(ret_from_fork)
+@@ -608,7 +1043,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -21048,7 +22169,7 @@ index 7272089..833fdf8 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -621,16 +1053,23 @@ GLOBAL(system_call_after_swapgs)
+@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -21074,7 +22195,7 @@ index 7272089..833fdf8 100644
jnz tracesys
system_call_fastpath:
#if __SYSCALL_MASK == ~0
-@@ -640,7 +1079,7 @@ system_call_fastpath:
+@@ -640,7 +1082,7 @@ system_call_fastpath:
cmpl $__NR_syscall_max,%eax
#endif
ja badsys
@@ -21083,7 +22204,7 @@ index 7272089..833fdf8 100644
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
/*
-@@ -654,10 +1093,13 @@ sysret_check:
+@@ -654,10 +1096,13 @@ sysret_check:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -21098,7 +22219,7 @@ index 7272089..833fdf8 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -709,14 +1151,18 @@ badsys:
+@@ -709,14 +1154,18 @@ badsys:
* jump back to the normal fast path.
*/
auditsys:
@@ -21118,7 +22239,7 @@ index 7272089..833fdf8 100644
jmp system_call_fastpath
/*
-@@ -737,7 +1183,7 @@ sysret_audit:
+@@ -737,7 +1186,7 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -21127,7 +22248,7 @@ index 7272089..833fdf8 100644
jz auditsys
#endif
SAVE_REST
-@@ -745,12 +1191,16 @@ tracesys:
+@@ -745,12 +1194,16 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -21144,7 +22265,7 @@ index 7272089..833fdf8 100644
RESTORE_REST
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
-@@ -759,7 +1209,7 @@ tracesys:
+@@ -759,7 +1212,7 @@ tracesys:
cmpl $__NR_syscall_max,%eax
#endif
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
@@ -21153,7 +22274,7 @@ index 7272089..833fdf8 100644
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
-@@ -780,7 +1230,9 @@ GLOBAL(int_with_check)
+@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
@@ -21164,7 +22285,7 @@ index 7272089..833fdf8 100644
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
-@@ -826,7 +1278,7 @@ int_restore_rest:
+@@ -826,7 +1281,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -21173,7 +22294,7 @@ index 7272089..833fdf8 100644
.macro FORK_LIKE func
ENTRY(stub_\func)
-@@ -839,9 +1291,10 @@ ENTRY(stub_\func)
+@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
DEFAULT_FRAME 0 8 /* offset 8: return address */
call sys_\func
RESTORE_TOP_OF_STACK %r11, 8
@@ -21185,7 +22306,7 @@ index 7272089..833fdf8 100644
.endm
.macro FIXED_FRAME label,func
-@@ -851,9 +1304,10 @@ ENTRY(\label)
+@@ -851,9 +1307,10 @@ ENTRY(\label)
FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
call \func
RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
@@ -21197,7 +22318,7 @@ index 7272089..833fdf8 100644
.endm
FORK_LIKE clone
-@@ -870,9 +1324,10 @@ ENTRY(ptregscall_common)
+@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
movq_cfi_restore R12+8, r12
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
@@ -21209,7 +22330,7 @@ index 7272089..833fdf8 100644
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -885,7 +1340,7 @@ ENTRY(stub_execve)
+@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21218,7 +22339,7 @@ index 7272089..833fdf8 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -902,7 +1357,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21227,7 +22348,7 @@ index 7272089..833fdf8 100644
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
-@@ -916,7 +1371,7 @@ ENTRY(stub_x32_rt_sigreturn)
+@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21236,7 +22357,7 @@ index 7272089..833fdf8 100644
ENTRY(stub_x32_execve)
CFI_STARTPROC
-@@ -930,7 +1385,7 @@ ENTRY(stub_x32_execve)
+@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21245,7 +22366,7 @@ index 7272089..833fdf8 100644
#endif
-@@ -967,7 +1422,7 @@ vector=vector+1
+@@ -967,7 +1425,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -21254,7 +22375,7 @@ index 7272089..833fdf8 100644
.previous
END(interrupt)
-@@ -987,6 +1442,16 @@ END(interrupt)
+@@ -987,6 +1445,16 @@ END(interrupt)
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
SAVE_ARGS_IRQ
@@ -21271,7 +22392,7 @@ index 7272089..833fdf8 100644
call \func
.endm
-@@ -1019,7 +1484,7 @@ ret_from_intr:
+@@ -1019,7 +1487,7 @@ ret_from_intr:
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -21280,7 +22401,7 @@ index 7272089..833fdf8 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -1041,12 +1506,16 @@ retint_swapgs: /* return to user-space */
+@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -21297,7 +22418,7 @@ index 7272089..833fdf8 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1129,7 +1598,7 @@ ENTRY(retint_kernel)
+@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -21306,7 +22427,7 @@ index 7272089..833fdf8 100644
/*
* End of kprobes section
*/
-@@ -1147,7 +1616,7 @@ ENTRY(\sym)
+@@ -1147,7 +1619,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -21315,7 +22436,7 @@ index 7272089..833fdf8 100644
.endm
#ifdef CONFIG_SMP
-@@ -1208,12 +1677,22 @@ ENTRY(\sym)
+@@ -1208,12 +1680,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -21339,7 +22460,7 @@ index 7272089..833fdf8 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1226,15 +1705,25 @@ ENTRY(\sym)
+@@ -1226,15 +1708,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -21367,7 +22488,7 @@ index 7272089..833fdf8 100644
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1245,14 +1734,30 @@ ENTRY(\sym)
+@@ -1245,14 +1737,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF_DEBUG
@@ -21399,7 +22520,7 @@ index 7272089..833fdf8 100644
.endm
.macro errorentry sym do_sym
-@@ -1264,13 +1769,23 @@ ENTRY(\sym)
+@@ -1264,13 +1772,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -21424,7 +22545,7 @@ index 7272089..833fdf8 100644
.endm
/* error code is on the stack already */
-@@ -1284,13 +1799,23 @@ ENTRY(\sym)
+@@ -1284,13 +1802,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -21449,7 +22570,7 @@ index 7272089..833fdf8 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1320,9 +1845,10 @@ gs_change:
+@@ -1320,9 +1848,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -21461,7 +22582,7 @@ index 7272089..833fdf8 100644
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1350,9 +1876,10 @@ ENTRY(call_softirq)
+@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -21473,7 +22594,7 @@ index 7272089..833fdf8 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1390,7 +1917,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -21482,7 +22603,7 @@ index 7272089..833fdf8 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1449,7 +1976,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -21491,7 +22612,7 @@ index 7272089..833fdf8 100644
apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1501,18 +2028,33 @@ ENTRY(paranoid_exit)
+@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -21527,7 +22648,7 @@ index 7272089..833fdf8 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1541,7 +2083,7 @@ paranoid_schedule:
+@@ -1541,7 +2086,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -21536,7 +22657,7 @@ index 7272089..833fdf8 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1568,12 +2110,13 @@ ENTRY(error_entry)
+@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -21551,7 +22672,7 @@ index 7272089..833fdf8 100644
ret
/*
-@@ -1600,7 +2143,7 @@ bstep_iret:
+@@ -1600,7 +2146,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -21560,7 +22681,7 @@ index 7272089..833fdf8 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1611,7 +2154,7 @@ ENTRY(error_exit)
+@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
@@ -21569,7 +22690,7 @@ index 7272089..833fdf8 100644
jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
-@@ -1620,7 +2163,7 @@ ENTRY(error_exit)
+@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -21578,7 +22699,7 @@ index 7272089..833fdf8 100644
/*
* Test if a given stack is an NMI stack or not.
-@@ -1678,9 +2221,11 @@ ENTRY(nmi)
+@@ -1678,9 +2224,11 @@ ENTRY(nmi)
* If %cs was not the kernel segment, then the NMI triggered in user
* space, which means it is definitely not nested.
*/
@@ -21591,7 +22712,7 @@ index 7272089..833fdf8 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
-@@ -1714,8 +2259,7 @@ nested_nmi:
+@@ -1714,8 +2262,7 @@ nested_nmi:
1:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
@@ -21601,7 +22722,7 @@ index 7272089..833fdf8 100644
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
-@@ -1733,6 +2277,7 @@ nested_nmi_out:
+@@ -1733,6 +2280,7 @@ nested_nmi_out:
CFI_RESTORE rdx
/* No need to check faults here */
@@ -21609,7 +22730,7 @@ index 7272089..833fdf8 100644
INTERRUPT_RETURN
CFI_RESTORE_STATE
-@@ -1849,6 +2394,8 @@ end_repeat_nmi:
+@@ -1849,6 +2397,8 @@ end_repeat_nmi:
*/
movq %cr2, %r12
@@ -21618,7 +22739,7 @@ index 7272089..833fdf8 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1861,26 +2408,31 @@ end_repeat_nmi:
+@@ -1861,26 +2411,31 @@ end_repeat_nmi:
movq %r12, %cr2
1:
@@ -22473,7 +23594,7 @@ index a836860..1b5c665 100644
- .skip PAGE_SIZE
+ .fill 512,8,0
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 0fa6912..37fce70 100644
+index 0fa6912..b37438b 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
@@ -22489,7 +23610,7 @@ index 0fa6912..37fce70 100644
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
-@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
+@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
@@ -22497,6 +23618,10 @@ index 0fa6912..37fce70 100644
+#ifdef CONFIG_PAX_KERNEXEC
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
+#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index f7ea30d..6318acc 100644
--- a/arch/x86/kernel/i387.c
@@ -24248,7 +25373,7 @@ index f2bb9c9..bed145d7 100644
1:
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 56f7fcf..3b88ad1 100644
+index 56f7fcf..2cfe4f1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -110,6 +110,7 @@
@@ -24259,7 +25384,7 @@ index 56f7fcf..3b88ad1 100644
/*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
-@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
+@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
#endif
@@ -24274,8 +25399,46 @@ index 56f7fcf..3b88ad1 100644
+unsigned long mmu_cr4_features __read_only;
#endif
++void set_in_cr4(unsigned long mask)
++{
++ unsigned long cr4 = read_cr4();
++
++ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
++ return;
++
++ pax_open_kernel();
++ mmu_cr4_features |= mask;
++ pax_close_kernel();
++
++ if (trampoline_cr4_features)
++ *trampoline_cr4_features = mmu_cr4_features;
++ cr4 |= mask;
++ write_cr4(cr4);
++}
++EXPORT_SYMBOL(set_in_cr4);
++
++void clear_in_cr4(unsigned long mask)
++{
++ unsigned long cr4 = read_cr4();
++
++ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
++ return;
++
++ pax_open_kernel();
++ mmu_cr4_features &= ~mask;
++ pax_close_kernel();
++
++ if (trampoline_cr4_features)
++ *trampoline_cr4_features = mmu_cr4_features;
++ cr4 &= ~mask;
++ write_cr4(cr4);
++}
++EXPORT_SYMBOL(clear_in_cr4);
++
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
+ int bootloader_type, bootloader_version;
+
+@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
switch (data->type) {
case SETUP_E820_EXT:
@@ -24284,7 +25447,7 @@ index 56f7fcf..3b88ad1 100644
break;
case SETUP_DTB:
add_dtb(pa_data);
-@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
+@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
@@ -24293,7 +25456,7 @@ index 56f7fcf..3b88ad1 100644
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
-@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
+@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
/* called before trim_bios_range() to spare extra sanitize */
static void __init e820_add_kernel_range(void)
{
@@ -24302,7 +25465,7 @@ index 56f7fcf..3b88ad1 100644
u64 size = __pa_symbol(_end) - start;
/*
-@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
+@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
void __init setup_arch(char **cmdline_p)
{
@@ -24315,7 +25478,7 @@ index 56f7fcf..3b88ad1 100644
early_reserve_initrd();
-@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -24516,7 +25679,7 @@ index 48d2b7d..90d328a 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index bfd348e..f0c1bf2 100644
+index bfd348e..914f323 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
@@ -24569,7 +25732,7 @@ index bfd348e..f0c1bf2 100644
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
-@@ -908,6 +915,18 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
/* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu);
@@ -24582,9 +25745,6 @@ index bfd348e..f0c1bf2 100644
+ KERNEL_PGD_PTRS);
+#endif
+
-+ /* the FPU context is blank, nobody can own it */
-+ __cpu_disable_lazy_restore(cpu);
-+
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
@@ -24821,7 +25981,7 @@ index 0000000..5877189
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+}
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index dbded5a..ace2781 100644
+index 30277e2..5664a29 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -81,8 +81,8 @@ out:
@@ -24839,8 +25999,8 @@ index dbded5a..ace2781 100644
*begin = new_begin;
}
} else {
-- *begin = TASK_UNMAPPED_BASE;
-+ *begin = mm->mmap_base;
+- *begin = current->mm->mmap_legacy_base;
++ *begin = mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
@@ -25600,7 +26760,7 @@ index 9a907a6..f83f921 100644
(unsigned long)VSYSCALL_START);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index b014d94..6d6ca7b 100644
+index b014d94..e775258 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
@@ -25612,6 +26772,14 @@ index b014d94..6d6ca7b 100644
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
+ #ifndef CONFIG_PARAVIRT
+ EXPORT_SYMBOL(native_load_gs_index);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14db..075bb9b 100644
--- a/arch/x86/kernel/x86_init.c
@@ -30647,7 +31815,7 @@ index d87dd6d..bf3fa66 100644
pte = kmemcheck_pte_lookup(address);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 845df68..1d8d29f 100644
+index 5c1ae28..45f4ac9 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
@@ -30705,32 +31873,24 @@ index 845df68..1d8d29f 100644
return TASK_UNMAPPED_BASE + mmap_rnd();
}
-@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
+@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
+ */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- if (mmap_is_legacy()) {
-- mm->mmap_base = mmap_legacy_base();
-+ mm->mmap_base = mmap_legacy_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
-- mm->mmap_base = mmap_base();
-+ mm->mmap_base = mmap_base(mm);
+- mm->mmap_legacy_base = mmap_legacy_base();
+- mm->mmap_base = mmap_base();
++ mm->mmap_legacy_base = mmap_legacy_base(mm);
++ mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ if (mm->pax_flags & MF_PAX_RANDMMAP) {
++ mm->mmap_legacy_base += mm->delta_mmap;
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ }
+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index dc0b727..f612039 100644
--- a/arch/x86/mm/mmio-mod.c
@@ -33363,7 +34523,7 @@ index fdc3ba2..3daee39 100644
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index d99cae8..18401e1 100644
+index a1e58e1..9392ad8 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
@@ -33544,6 +34704,28 @@ index af00795..2bb8105 100644
#define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index e8918ff..b3ffc51 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -825,7 +825,7 @@ static void blkcg_css_free(struct cgroup *cgroup)
+
+ static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
+ {
+- static atomic64_t id_seq = ATOMIC64_INIT(0);
++ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
+ struct blkcg *blkcg;
+ struct cgroup *parent = cgroup->parent;
+
+@@ -840,7 +840,7 @@ static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
+
+ blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
+- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
++ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
+ done:
+ spin_lock_init(&blkcg->lock);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 58916af..eb9dbcf6 100644
--- a/block/blk-iopoll.c
@@ -33837,6 +35019,28 @@ index 33dc6a0..4b24b47 100644
}
EXPORT_SYMBOL_GPL(cper_next_record_id);
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index fcd7d91..6b2f1a3 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -468,7 +468,7 @@ static void __ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
+ {
+- static atomic_t seqno;
++ static atomic_unchecked_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
+@@ -479,7 +479,7 @@ static void __ghes_print_estatus(const char *pfx,
+ else
+ pfx = KERN_ERR;
+ }
+- curr_seqno = atomic_inc_return(&seqno);
++ curr_seqno = atomic_inc_return_unchecked(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
+ printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx_seq, generic->header.source_id);
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index be60399..778b33e8 100644
--- a/drivers/acpi/bgrt.c
@@ -33974,9 +35178,18 @@ index 7b9bdd8..37638ca 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index adf002a..39bb8f9 100644
+index adf002a..06c46a7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
+@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+ static void ata_dev_xfermask(struct ata_device *dev);
+ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+
+-atomic_t ata_print_id = ATOMIC_INIT(0);
++atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
+
+ struct ata_force_param {
+ const char *name;
@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -34014,6 +35227,41 @@ index adf002a..39bb8f9 100644
spin_unlock(&lock);
}
+@@ -6133,7 +6135,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+
+ /* give ports names and add SCSI hosts */
+ for (i = 0; i < host->n_ports; i++)
+- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
++ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
+
+
+ /* Create associated sysfs transport objects */
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 0101af5..c70c325 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4105,7 +4105,7 @@ int ata_sas_port_init(struct ata_port *ap)
+
+ if (rc)
+ return rc;
+- ap->print_id = atomic_inc_return(&ata_print_id);
++ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(ata_sas_port_init);
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index 577d902b..cb4781e 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -53,7 +53,7 @@ enum {
+ ATA_DNXFER_QUIET = (1 << 31),
+ };
+
+-extern atomic_t ata_print_id;
++extern atomic_unchecked_t ata_print_id;
+ extern int atapi_passthru16;
+ extern int libata_fua;
+ extern int libata_noacpi;
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 7638121..357a965 100644
--- a/drivers/ata/pata_arasan_cf.c
@@ -35542,6 +36790,28 @@ index a5dca6a..bb27967 100644
kfree(tconn->current_epoch);
idr_destroy(&tconn->volumes);
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 9e3f441..4044d47 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -3339,7 +3339,7 @@ out:
+
+ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
+ {
+- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
++ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+ struct sk_buff *msg;
+ struct drbd_genlmsghdr *d_out;
+ unsigned seq;
+@@ -3352,7 +3352,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
+ return;
+ }
+
+- seq = atomic_inc_return(&drbd_genl_seq);
++ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+ if (!msg)
+ goto failed;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 4222aff..1f79506 100644
--- a/drivers/block/drbd/drbd_receiver.c
@@ -36781,6 +38051,28 @@ index 428754a..8bdf9cc 100644
.attrs = cpuidle_default_attrs,
.name = "cpuidle",
};
+diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
+index ebf130e..e32d8a9 100644
+--- a/drivers/crypto/hifn_795x.c
++++ b/drivers/crypto/hifn_795x.c
+@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
+ MODULE_PARM_DESC(hifn_pll_ref,
+ "PLL reference clock (pci[freq] or ext[freq], default ext)");
+
+-static atomic_t hifn_dev_number;
++static atomic_unchecked_t hifn_dev_number;
+
+ #define ACRYPTO_OP_DECRYPT 0
+ #define ACRYPTO_OP_ENCRYPT 1
+@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto err_out_disable_pci_device;
+
+ snprintf(name, sizeof(name), "hifn%d",
+- atomic_inc_return(&hifn_dev_number)-1);
++ atomic_inc_return_unchecked(&hifn_dev_number)-1);
+
+ err = pci_request_regions(pdev, name);
+ if (err)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3b36797..db0b0c0 100644
--- a/drivers/devfreq/devfreq.c
@@ -36825,6 +38117,22 @@ index b70709b..1d8d02a 100644
.notifier_call = sh_dmae_nmi_handler,
/* Run before NMI debug handler and KGDB */
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 211021d..201d47f 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ */
+ int edac_device_alloc_index(void)
+ {
+- static atomic_t device_indexes = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
+
+- return atomic_inc_return(&device_indexes) - 1;
++ return atomic_inc_return_unchecked(&device_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
+
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index c4d700a..0b57abd 100644
--- a/drivers/edac/edac_mc_sysfs.c
@@ -36859,6 +38167,28 @@ index c4d700a..0b57abd 100644
err = device_create_file(&mci->dev,
&dev_attr_sdram_scrub_rate);
if (err) {
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index dd370f9..0281629 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -29,7 +29,7 @@
+
+ static DEFINE_MUTEX(edac_pci_ctls_mutex);
+ static LIST_HEAD(edac_pci_list);
+-static atomic_t pci_indexes = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
+
+ /*
+ * edac_pci_alloc_ctl_info
+@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+ */
+ int edac_pci_alloc_index(void)
+ {
+- return atomic_inc_return(&pci_indexes) - 1;
++ return atomic_inc_return_unchecked(&pci_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
+
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index e8658e4..22746d6 100644
--- a/drivers/edac/edac_pci_sysfs.c
@@ -36969,9 +38299,21 @@ index 51b7e3a..aa8a3e8 100644
void amd_report_gart_errors(bool);
void amd_register_ecc_decoder(void (*f)(int, struct mce *));
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
-index 57ea7f4..789e3c3 100644
+index 57ea7f4..af06b76 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
+@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver,
+ struct device *device)
+ {
+- static atomic_t index = ATOMIC_INIT(-1);
++ static atomic_unchecked_t index = ATOMIC_INIT(-1);
+
+- card->index = atomic_inc_return(&index);
++ card->index = atomic_inc_return_unchecked(&index);
+ card->driver = driver;
+ card->device = device;
+ card->current_tlabel = 0;
@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
@@ -38614,10 +39956,112 @@ index 8c04943..4370ed9 100644
err = drm_debugfs_create_files(dc->debugfs_files,
ARRAY_SIZE(debugfs_files),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 402f486..f862d7e 100644
+index 402f486..5340852 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
-@@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
+ struct hid_report_enum *report_enum = device->report_enum + type;
+ struct hid_report *report;
+
++ if (id >= HID_MAX_IDS)
++ return NULL;
+ if (report_enum->report_id_hash[id])
+ return report_enum->report_id_hash[id];
+
+@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+
+ case HID_GLOBAL_ITEM_TAG_REPORT_ID:
+ parser->global.report_id = item_udata(item);
+- if (parser->global.report_id == 0) {
+- hid_err(parser->device, "report_id 0 is invalid\n");
++ if (parser->global.report_id == 0 ||
++ parser->global.report_id >= HID_MAX_IDS) {
++ hid_err(parser->device, "report_id %u is invalid\n",
++ parser->global.report_id);
+ return -1;
+ }
+ return 0;
+@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
+ for (i = 0; i < HID_REPORT_TYPES; i++) {
+ struct hid_report_enum *report_enum = device->report_enum + i;
+
+- for (j = 0; j < 256; j++) {
++ for (j = 0; j < HID_MAX_IDS; j++) {
+ struct hid_report *report = report_enum->report_id_hash[j];
+ if (report)
+ hid_free_report(report);
+@@ -755,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
+ }
+ EXPORT_SYMBOL_GPL(hid_parse_report);
+
++static const char * const hid_report_names[] = {
++ "HID_INPUT_REPORT",
++ "HID_OUTPUT_REPORT",
++ "HID_FEATURE_REPORT",
++};
++/**
++ * hid_validate_report - validate existing device report
++ *
++ * @device: hid device
++ * @type: which report type to examine
++ * @id: which report ID to examine (0 for first)
++ * @fields: expected number of fields
++ * @report_counts: expected number of values per field
++ *
++ * Validate the report details after parsing.
++ */
++struct hid_report *hid_validate_report(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int fields,
++ unsigned int report_counts)
++{
++ struct hid_report *report;
++ unsigned int i;
++
++ if (type > HID_FEATURE_REPORT) {
++ hid_err(hid, "invalid HID report %u\n", type);
++ return NULL;
++ }
++
++ report = hid->report_enum[type].report_id_hash[id];
++ if (!report) {
++ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->maxfield < fields) {
++ hid_err(hid, "not enough fields in %s %u\n",
++ hid_report_names[type], id);
++ return NULL;
++ }
++ for (i = 0; i < fields; i++) {
++ if (report->field[i]->report_count < report_counts) {
++ hid_err(hid, "not enough values in %s %u fields\n",
++ hid_report_names[type], id);
++ return NULL;
++ }
++ }
++ return report;
++}
++EXPORT_SYMBOL_GPL(hid_validate_report);
++
+ /**
+ * hid_open_report - open a driver-specific device report
+ *
+@@ -1152,7 +1206,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
+
+ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
+ {
+- unsigned size = field->report_size;
++ unsigned size;
++
++ if (!field)
++ return -1;
++
++ size = field->report_size;
+
+ hid_dump_input(field->report->device, field->usage + offset, value);
+
+@@ -2275,7 +2334,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -38626,7 +40070,7 @@ index 402f486..f862d7e 100644
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2309,7 +2309,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2309,7 +2368,7 @@ int hid_add_device(struct hid_device *hdev)
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -38635,6 +40079,349 @@ index 402f486..f862d7e 100644
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
+diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
+index 07837f5..b697ada 100644
+--- a/drivers/hid/hid-lenovo-tpkbd.c
++++ b/drivers/hid/hid-lenovo-tpkbd.c
+@@ -341,6 +341,11 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
+ char *name_mute, *name_micmute;
+ int ret;
+
++ /* Validate required reports. */
++ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 4, 4, 1) ||
++ !hid_validate_report(hdev, HID_OUTPUT_REPORT, 3, 1, 2))
++ return -ENODEV;
++
+ if (sysfs_create_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer)) {
+ hid_warn(hdev, "Could not create sysfs group\n");
+diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
+index b3cd150..9805197 100644
+--- a/drivers/hid/hid-lg2ff.c
++++ b/drivers/hid/hid-lg2ff.c
+@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
++ /* Check that the report looks ok */
++ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7);
++ if (!report)
+ return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 1) {
+- hid_err(hid, "output report is empty\n");
+- return -ENODEV;
+- }
+- if (report->field[0]->report_count < 7) {
+- hid_err(hid, "not enough values in the field\n");
+- return -ENODEV;
+- }
+
+ lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
+ if (!lg2ff)
+diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
+index e52f181..53ac79b 100644
+--- a/drivers/hid/hid-lg3ff.c
++++ b/drivers/hid/hid-lg3ff.c
+@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
+ int x, y;
+
+ /*
+- * Maxusage should always be 63 (maximum fields)
+- * likely a better way to ensure this data is clean
++ * Available values in the field should always be 63, but we only use up to
++ * 35. Instead, clear the entire area, however big it is.
+ */
+- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
++ memset(report->field[0]->value, 0,
++ sizeof(__s32) * report->field[0]->report_count);
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
+ int lg3ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff3_joystick_ac;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
+- return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 35))
++ return -ENODEV;
+
+ /* Assume single fixed device G940 */
+ for (i = 0; ff_bits[i] >= 0; i++)
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 0ddae2a..8b89f0f 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
+ int lg4ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
++ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
+ return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
+
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
+index d7ea8c8..a84fb40 100644
+--- a/drivers/hid/hid-lgff.c
++++ b/drivers/hid/hid-lgff.c
+@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
+ int lgff_init(struct hid_device* hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff_joystick;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
++ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(devices); i++) {
+ if (dev->id.vendor == devices[i].idVendor &&
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 5207591a..6c9197f 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -421,7 +421,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+ struct hid_report *report;
+ struct hid_report_enum *output_report_enum;
+ u8 *data = (u8 *)(&dj_report->device_index);
+- int i;
++ unsigned int i, length;
+
+ output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
+ report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+@@ -431,7 +431,9 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+ return -ENODEV;
+ }
+
+- for (i = 0; i < report->field[0]->report_count; i++)
++ length = min_t(size_t, sizeof(*dj_report) - 1,
++ report->field[0]->report_count);
++ for (i = 0; i < length; i++)
+ report->field[0]->value[i] = data[i];
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+@@ -738,6 +740,12 @@ static int logi_dj_probe(struct hid_device *hdev,
+ goto hid_parse_fail;
+ }
+
++ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
++ 1, 3)) {
++ retval = -ENODEV;
++ goto hid_parse_fail;
++ }
++
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index d39a5ce..4892dfc 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -330,9 +330,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ break;
+ }
+ }
++ /* Ignore if value index is out of bounds. */
++ if (td->inputmode_index < 0 ||
++ td->inputmode_index >= field->report_count) {
++ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
++ td->inputmode = -1;
++ }
+
+ break;
+ case HID_DG_CONTACTMAX:
++ /* Ignore if value count is out of bounds. */
++ if (field->report_count < 1)
++ break;
+ td->maxcontact_report_id = field->report->id;
+ td->maxcontacts = field->value[0];
+ if (!td->maxcontacts &&
+@@ -743,15 +752,21 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
+ unsigned count;
+ int r, n;
+
++ if (report->maxfield == 0)
++ return;
++
+ /*
+ * Includes multi-packet support where subsequent
+ * packets are sent with zero contactcount.
+ */
+- if (td->cc_index >= 0) {
+- struct hid_field *field = report->field[td->cc_index];
+- int value = field->value[td->cc_value_index];
+- if (value)
+- td->num_expected = value;
++ if (td->cc_index >= 0 && td->cc_index < report->maxfield) {
++ field = report->field[td->cc_index];
++ if (td->cc_value_index >= 0 &&
++ td->cc_value_index < field->report_count) {
++ int value = field->value[td->cc_value_index];
++ if (value)
++ td->num_expected = value;
++ }
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
+index ef95102..5482156 100644
+--- a/drivers/hid/hid-ntrig.c
++++ b/drivers/hid/hid-ntrig.c
+@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
+ struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
+ report_id_hash[0x0d];
+
+- if (!report)
++ if (!report || report->maxfield < 1 ||
++ report->field[0]->report_count < 1)
+ return -EINVAL;
+
+ hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
+diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
+index b48092d..72bba1e 100644
+--- a/drivers/hid/hid-picolcd_core.c
++++ b/drivers/hid/hid-picolcd_core.c
+@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
+ buf += 10;
+ cnt -= 10;
+ }
+- if (!report)
++ if (!report || report->maxfield < 1)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
+index d29112f..2dcd7d9 100644
+--- a/drivers/hid/hid-pl.c
++++ b/drivers/hid/hid-pl.c
+@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
+ strong = &report->field[0]->value[2];
+ weak = &report->field[0]->value[3];
+ debug("detected single-field device");
+- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
+- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
++ } else if (report->field[0]->maxusage == 1 &&
++ report->field[0]->usage[0].hid ==
++ (HID_UP_LED | 0x43) &&
++ report->maxfield >= 4 &&
++ report->field[0]->report_count >= 1 &&
++ report->field[1]->report_count >= 1 &&
++ report->field[2]->report_count >= 1 &&
++ report->field[3]->report_count >= 1) {
+ report->field[0]->value[0] = 0x00;
+ report->field[1]->value[0] = 0x00;
+ strong = &report->field[2]->value[0];
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index ca749810..aa34755 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+
+ mutex_lock(&data->mutex);
+ report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
+- if (!report || (field_index >= report->maxfield)) {
++ if (!report || (field_index >= report->maxfield) ||
++ report->field[field_index]->report_count < 1) {
+ ret = -EINVAL;
+ goto done_proc;
+ }
+diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
+index d164911..ef42e86 100644
+--- a/drivers/hid/hid-steelseries.c
++++ b/drivers/hid/hid-steelseries.c
+@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
+ goto err_free;
+ }
+
++ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 0, 1, 16)) {
++ ret = -ENODEV;
++ goto err_free;
++ }
++
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index 90124ff..3761764 100644
--- a/drivers/hid/hid-wiimote-debug.c
@@ -38648,6 +40435,66 @@ index 90124ff..3761764 100644
return -EFAULT;
*off += size;
+diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
+index 6ec28a3..b124991 100644
+--- a/drivers/hid/hid-zpff.c
++++ b/drivers/hid/hid-zpff.c
+@@ -68,22 +68,12 @@ static int zpff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
++ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 4, 1);
++ if (!report)
+ return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 4) {
+- hid_err(hid, "not enough fields in report\n");
+- return -ENODEV;
+- }
+
+ zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
+ if (!zpff)
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index fc307e0..2b255e8 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -47,7 +47,7 @@ struct uhid_device {
+ struct mutex report_lock;
+ wait_queue_head_t report_wait;
+ atomic_t report_done;
+- atomic_t report_id;
++ atomic_unchecked_t report_id;
+ struct uhid_event report_buf;
+ };
+
+@@ -187,7 +187,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ ev->type = UHID_FEATURE;
+- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
++ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
+ ev->u.feature.rnum = rnum;
+ ev->u.feature.rtype = report_type;
+
+@@ -471,7 +471,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
+ spin_lock_irqsave(&uhid->qlock, flags);
+
+ /* id for old report; drop it silently */
+- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
++ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
+ goto unlock;
+ if (atomic_read(&uhid->report_done))
+ goto unlock;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 0b122f8..b1d8160 100644
--- a/drivers/hv/channel.c
@@ -38676,6 +40523,91 @@ index ae49237..380d4c9 100644
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi),
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index deb5c25..ed2d4fd 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+
+ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+ MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
+-static atomic_t trans_id = ATOMIC_INIT(0);
++static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
+
+ static int dm_ring_size = (5 * PAGE_SIZE);
+
+@@ -825,7 +825,7 @@ static void hot_add_req(struct work_struct *dummy)
+ memset(&resp, 0, sizeof(struct dm_hot_add_response));
+ resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
+ resp.hdr.size = sizeof(struct dm_hot_add_response);
+- resp.hdr.trans_id = atomic_inc_return(&trans_id);
++ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
+@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
+ memset(&status, 0, sizeof(struct dm_status));
+ status.hdr.type = DM_STATUS_REPORT;
+ status.hdr.size = sizeof(struct dm_status);
+- status.hdr.trans_id = atomic_inc_return(&trans_id);
++ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+
+ /*
+ * The host expects the guest to report free memory.
+@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+- if (status.hdr.trans_id != atomic_read(&trans_id))
++ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
+ return;
+
+ vmbus_sendpacket(dm->dev->channel, &status,
+@@ -1081,7 +1081,7 @@ static void balloon_up(struct work_struct *dummy)
+ bl_resp = (struct dm_balloon_response *)send_buffer;
+ memset(send_buffer, 0, PAGE_SIZE);
+ bl_resp->hdr.type = DM_BALLOON_RESPONSE;
+- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
++ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+ bl_resp->hdr.size = sizeof(struct dm_balloon_response);
+ bl_resp->more_pages = 1;
+
+@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
+
+ memset(&resp, 0, sizeof(struct dm_unballoon_response));
+ resp.hdr.type = DM_UNBALLOON_RESPONSE;
+- resp.hdr.trans_id = atomic_inc_return(&trans_id);
++ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+ resp.hdr.size = sizeof(struct dm_unballoon_response);
+
+ vmbus_sendpacket(dm_device.dev->channel, &resp,
+@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
++ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ version_req.is_last_attempt = 1;
+
+@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
++ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
+ version_req.is_last_attempt = 0;
+
+@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
+ memset(&cap_msg, 0, sizeof(struct dm_capabilities));
+ cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
+ cap_msg.hdr.size = sizeof(struct dm_capabilities);
+- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
++ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+
+ cap_msg.caps.cap_bits.balloon = 1;
+ cap_msg.caps.cap_bits.hot_add = 1;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 12f2f9e..679603c 100644
--- a/drivers/hv/hyperv_vmbus.h
@@ -39277,6 +41209,32 @@ index 1f95bba..9530f87 100644
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap);
goto send_comp;
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 4d599ce..697b17f 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
+
+ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
+ {
+- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
++ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
+ cpu_to_be64(0xff00000000000000LL);
+ }
+
+diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+index f61ec26..ebf72cf 100644
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -398,7 +398,7 @@ struct mlx4_ib_demux_ctx {
+ struct list_head mcg_mgid0_list;
+ struct workqueue_struct *mcg_wq;
+ struct mlx4_ib_demux_pv_ctx **tun;
+- atomic_t tid;
++ atomic_unchecked_t tid;
+ int flushing; /* flushing the work queue */
+ };
+
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 9d3e5c1..d9afe4a 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -39805,6 +41763,28 @@ index fa061d4..4a6957c 100644
snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
led->xpad = xpad;
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index e204f26..8459f15 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
+
+ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
+ {
+- static atomic_t device_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
+
+ const struct ims_pcu_device_info *info;
+ u8 device_id;
+@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
+ }
+
+ /* Device appears to be operable, complete initialization */
+- pcu->device_no = atomic_inc_return(&device_no) - 1;
++ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
+
+ error = ims_pcu_setup_backlight(pcu);
+ if (error)
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 2f0b39d..7370f13 100644
--- a/drivers/input/mouse/psmouse.h
@@ -39853,6 +41833,28 @@ index 25fc597..558bf3b3 100644
serio->dev.bus = &serio_bus;
serio->dev.release = serio_release_port;
serio->dev.groups = serio_device_attr_groups;
+diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
+index 59df2e7..8f1cafb 100644
+--- a/drivers/input/serio/serio_raw.c
++++ b/drivers/input/serio/serio_raw.c
+@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
+
+ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+ {
+- static atomic_t serio_raw_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
+ struct serio_raw *serio_raw;
+ int err;
+
+@@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+ }
+
+ snprintf(serio_raw->name, sizeof(serio_raw->name),
+- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
++ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
+ kref_init(&serio_raw->kref);
+ INIT_LIST_HEAD(&serio_raw->client_list);
+ init_waitqueue_head(&serio_raw->wait);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d8f98b1..f62a640 100644
--- a/drivers/iommu/iommu.c
@@ -39987,6 +41989,19 @@ index 600c79b..3752bab 100644
tty_port_tty_set(&cs->port, NULL);
mutex_unlock(&cs->mutex);
+diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
+index d0a41cb..f0cdb8c 100644
+--- a/drivers/isdn/gigaset/usb-gigaset.c
++++ b/drivers/isdn/gigaset/usb-gigaset.c
+@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+ gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
+ memcpy(cs->hw.usb->bchars, buf, 6);
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
+- 0, 0, &buf, 6, 2000);
++ 0, 0, buf, 6, 2000);
+ }
+
+ static void gigaset_freebcshw(struct bc_state *bcs)
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4d9b195..455075c 100644
--- a/drivers/isdn/hardware/avm/b1.c
@@ -40009,6 +42024,19 @@ index 4d9b195..455075c 100644
return -EFAULT;
} else {
memcpy(buf, dp, left);
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index 9bb12ba..d4262f7 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
+ } else
+ return -EINVAL;
+ case IIOCDBGVAR:
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
+ if (arg) {
+ if (copy_to_user(argp, &dev, sizeof(ulong)))
+ return -EFAULT;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 3c5f249..5fac4d0 100644
--- a/drivers/isdn/i4l/isdn_tty.c
@@ -40277,6 +42305,19 @@ index 0003992..854bbce 100644
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index b4713ce..b30139b 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1603,7 +1603,7 @@ err_unlock_gc:
+ err:
+ closure_sync(&op.cl);
+ /* XXX: test this, it's broken */
+- bch_cache_set_error(c, err);
++ bch_cache_set_error(c, "%s", err);
+ }
+
+ static bool can_attach_cache(struct cache *ca, struct cache_set *c)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a2c754..0fa55db 100644
--- a/drivers/md/bitmap.c
@@ -40831,6 +42872,19 @@ index c7a9be1..683f6f8 100644
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
+diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
+index 07b8460..e6d7265 100644
+--- a/drivers/media/pci/ivtv/ivtv-driver.c
++++ b/drivers/media/pci/ivtv/ivtv-driver.c
+@@ -84,7 +84,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
+ MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
+
+ /* ivtv instance counter */
+-static atomic_t ivtv_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
+
+ /* Parameter declarations */
+ static int cardtype[IVTV_MAX_CARDS];
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index d338b19..aae4f9e 100644
--- a/drivers/media/platform/omap/omap_vout.c
@@ -41041,6 +43095,80 @@ index 545c04c..a14bded 100644
i = -EFAULT;
unlock:
mutex_unlock(&dev->lock);
+diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
+index bd4d3a7..ffc0b9d 100644
+--- a/drivers/media/radio/radio-maxiradio.c
++++ b/drivers/media/radio/radio-maxiradio.c
+@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
+ /* TEA5757 pin mappings */
+ static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
+
+-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
+
+ #define PCI_VENDOR_ID_GUILLEMOT 0x5046
+ #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
+diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
+index 8fa18ab..caee70f 100644
+--- a/drivers/media/radio/radio-shark.c
++++ b/drivers/media/radio/radio-shark.c
+@@ -79,7 +79,7 @@ struct shark_device {
+ u32 last_val;
+ };
+
+-static atomic_t shark_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
+
+ static void shark_write_val(struct snd_tea575x *tea, u32 val)
+ {
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index 9fb6697..f167415 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -74,7 +74,7 @@ struct shark_device {
+ u8 *transfer_buffer;
+ };
+
+-static atomic_t shark_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
+
+ static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
+ {
+diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
+index 9dc8baf..796d52f 100644
+--- a/drivers/media/radio/radio-si476x.c
++++ b/drivers/media/radio/radio-si476x.c
+@@ -1456,7 +1456,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
+ struct si476x_radio *radio;
+ struct v4l2_ctrl *ctrl;
+
+- static atomic_t instance = ATOMIC_INIT(0);
++ static atomic_unchecked_t instance = ATOMIC_INIT(0);
+
+ radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
+ if (!radio)
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 1cf382a..c22998c 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1030,7 +1030,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
+ int rc_register_device(struct rc_dev *dev)
+ {
+ static bool raw_init = false; /* raw decoders loaded? */
+- static atomic_t devno = ATOMIC_INIT(0);
++ static atomic_unchecked_t devno = ATOMIC_INIT(0);
+ struct rc_map *rc_map;
+ const char *path;
+ int rc;
+@@ -1061,7 +1061,7 @@ int rc_register_device(struct rc_dev *dev)
+ */
+ mutex_lock(&dev->lock);
+
+- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
++ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
+ dev_set_name(&dev->dev, "rc%ld", dev->devno);
+ dev_set_drvdata(&dev->dev, dev);
+ rc = device_add(&dev->dev);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 3940bb0..fb3952a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
@@ -41098,6 +43226,22 @@ index f129551..ecf6514 100644
return -EFAULT;
return 0;
}
+diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
+index 8ed5da2..47fee46 100644
+--- a/drivers/media/v4l2-core/v4l2-device.c
++++ b/drivers/media/v4l2-core/v4l2-device.c
+@@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
+ EXPORT_SYMBOL_GPL(v4l2_device_put);
+
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance)
++ atomic_unchecked_t *instance)
+ {
+- int num = atomic_inc_return(instance) - 1;
++ int num = atomic_inc_return_unchecked(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 7658586..1079260 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -41951,19 +44095,6 @@ index f975696..4597e21 100644
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
-diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
-index 25723d8..925ab8e 100644
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
-@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
- if ((mc->ptr + rec_len) > mc->end)
- goto decode_failed;
-
-- memcpy(cf->data, mc->ptr, rec_len);
-+ memcpy(cf->data, mc->ptr, cf->can_dlc);
- mc->ptr += rec_len;
- }
-
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d2643..7f4133b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
@@ -42646,6 +44777,28 @@ index cba1d46..f703766 100644
result =
hso_start_serial_device(serial_table[i], GFP_NOIO);
hso_kick_transmit(dev2ser(serial_table[i]));
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index a79e9d3..78cd4fa 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
+ /* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+-static atomic_t iface_counter = ATOMIC_INIT(0);
++static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
+
+ /*
+ * SYNC Timer Delay definition used to set the expiry time
+@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->netdev_ops = &sierra_net_device_ops;
+
+ /* change MAC addr to include, ifacenum, and to be unique */
+- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
++ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
+ dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+ /* we will have to manufacture ethernet headers, prepare template */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 57325f3..36b181f 100644
--- a/drivers/net/vxlan.c
@@ -43050,8 +45203,29 @@ index d532948..e0d8bb1 100644
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
+index a8afc7b..de058b2 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/main.c
++++ b/drivers/net/wireless/iwlwifi/dvm/main.c
+@@ -1189,7 +1189,7 @@ static void iwl_option_config(struct iwl_priv *priv)
+ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+ {
+ struct iwl_nvm_data *data = priv->nvm_data;
+- char *debug_msg;
++ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
+
+ if (data->sku_cap_11n_enable &&
+ !priv->cfg->ht_params) {
+@@ -1203,7 +1203,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+ return -EINVAL;
+ }
+
+- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
+ IWL_DEBUG_INFO(priv, debug_msg,
+ data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
+ data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index 50ba0a4..29424e7 100644
+index aeb70e1..d7b5bb5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
@@ -43546,6 +45720,26 @@ index 7d72c5e..edce02c 100644
char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 5127f3f..b225573 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -773,14 +773,12 @@ static void pcie_shutdown_notification(struct controller *ctrl)
+ static int pcie_init_slot(struct controller *ctrl)
+ {
+ struct slot *slot;
+- char name[32];
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+- snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
+- slot->wq = alloc_workqueue(name, 0, 0);
++ slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ if (!slot->wq)
+ goto abort;
+
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5b4a9d9..cd5ac1f 100644
--- a/drivers/pci/pci-sysfs.c
@@ -43824,6 +46018,19 @@ index 54d31c0..3f896d3 100644
/*
* Polling driver
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index e4ac38a..b13344c 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -743,7 +743,7 @@ static int wmi_create_device(const struct guid_block *gblock,
+ wblock->dev.class = &wmi_class;
+
+ wmi_gtoa(gblock->guid, guid_string);
+- dev_set_name(&wblock->dev, guid_string);
++ dev_set_name(&wblock->dev, "%s", guid_string);
+
+ dev_set_drvdata(&wblock->dev, wblock);
+
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 769d265..a3a05ca 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
@@ -43999,6 +46206,28 @@ index 29178f7..c65f324 100644
for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
__power_supply_attrs[i] = &power_supply_attrs[i].attr;
}
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 815d6df..811633a 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ {
+ const struct regulation_constraints *constraints = NULL;
+ const struct regulator_init_data *init_data;
+- static atomic_t regulator_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
+ struct regulator_dev *rdev;
+ struct device *dev;
+ int ret, i;
+@@ -3599,7 +3599,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ rdev->dev.of_node = config->of_node;
+ rdev->dev.parent = dev;
+ dev_set_name(&rdev->dev, "regulator.%d",
+- atomic_inc_return(&regulator_no) - 1);
++ atomic_inc_return_unchecked(&regulator_no) - 1);
+ ret = device_register(&rdev->dev);
+ if (ret != 0) {
+ put_device(&rdev->dev);
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index d428ef9..fdc0357 100644
--- a/drivers/regulator/max8660.c
@@ -44153,6 +46382,50 @@ index 23a90e7..9cf04ee 100644
/*
* Queue element to wait for room in request queue. FIFO order is
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
+index 8c05ae01..b2cf224 100644
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c
+@@ -33,8 +33,8 @@
+ */
+ #include "libfcoe.h"
+
+-static atomic_t ctlr_num;
+-static atomic_t fcf_num;
++static atomic_unchecked_t ctlr_num;
++static atomic_unchecked_t fcf_num;
+
+ /*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+@@ -681,7 +681,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ if (!ctlr)
+ goto out;
+
+- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
++ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
+ ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
+ INIT_LIST_HEAD(&ctlr->fcfs);
+@@ -898,7 +898,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ fcf->dev.parent = &ctlr->dev;
+ fcf->dev.bus = &fcoe_bus_type;
+ fcf->dev.type = &fcoe_fcf_device_type;
+- fcf->id = atomic_inc_return(&fcf_num) - 1;
++ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
+ fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+ fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+@@ -934,8 +934,8 @@ int __init fcoe_sysfs_setup(void)
+ {
+ int error;
+
+- atomic_set(&ctlr_num, 0);
+- atomic_set(&fcf_num, 0);
++ atomic_set_unchecked(&ctlr_num, 0);
++ atomic_set_unchecked(&fcf_num, 0);
+
+ error = bus_register(&fcoe_bus_type);
+ if (error)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index df0c3c7..b00e1d0 100644
--- a/drivers/scsi/hosts.c
@@ -45030,7 +47303,7 @@ index f379c7f..e8fc69c 100644
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 610417e..1544fa9 100644
+index 610417e..167c46c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
@@ -45042,6 +47315,15 @@ index 610417e..1544fa9 100644
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
+@@ -2941,7 +2941,7 @@ static int sd_probe(struct device *dev)
+ device_initialize(&sdkp->dev);
+ sdkp->dev.parent = dev;
+ sdkp->dev.class = &sd_disk_class;
+- dev_set_name(&sdkp->dev, dev_name(dev));
++ dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+
+ if (device_add(&sdkp->dev))
+ goto out_free_index;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961..df6b97f 100644
--- a/drivers/scsi/sg.c
@@ -45068,6 +47350,37 @@ index 32b7bb1..2f1c4bd 100644
static u8 *buf;
+diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
+index ec9e2ae..cd15d67 100644
+--- a/drivers/staging/android/timed_output.c
++++ b/drivers/staging/android/timed_output.c
+@@ -25,7 +25,7 @@
+ #include "timed_output.h"
+
+ static struct class *timed_output_class;
+-static atomic_t device_count;
++static atomic_unchecked_t device_count;
+
+ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+@@ -59,7 +59,7 @@ static int create_timed_output_class(void)
+ timed_output_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_output_class))
+ return PTR_ERR(timed_output_class);
+- atomic_set(&device_count, 0);
++ atomic_set_unchecked(&device_count, 0);
+ }
+
+ return 0;
+@@ -76,7 +76,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
+ if (ret < 0)
+ return ret;
+
+- tdev->index = atomic_inc_return(&device_count);
++ tdev->index = atomic_inc_return_unchecked(&device_count);
+ tdev->dev = device_create(timed_output_class, NULL,
+ MKDEV(0, tdev->index), NULL, tdev->name);
+ if (IS_ERR(tdev->dev))
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 3675020..e80d92c 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -45081,6 +47394,32 @@ index 3675020..e80d92c 100644
struct device *dev = &solo_dev->dev;
const char *driver;
int i;
+diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
+index 3335941..2b26186 100644
+--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
++++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
+@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
+
+ /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
+ if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
+- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
++ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
+ if (p2m_id < 0)
+ p2m_id = -p2m_id;
+ }
+diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
+index 6f91d2e..3f011d2 100644
+--- a/drivers/staging/media/solo6x10/solo6x10.h
++++ b/drivers/staging/media/solo6x10/solo6x10.h
+@@ -238,7 +238,7 @@ struct solo_dev {
+
+ /* P2M DMA Engine */
+ struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
+- atomic_t p2m_count;
++ atomic_unchecked_t p2m_count;
+ int p2m_jiffies;
+ unsigned int p2m_timeouts;
+
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16..ffe44dd 100644
--- a/drivers/staging/octeon/ethernet-rx.c
@@ -45276,48 +47615,50 @@ index c699a30..b90a5fd 100644
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
-index d7e51e4..d07eaab 100644
---- a/drivers/staging/zcache/tmem.c
-+++ b/drivers/staging/zcache/tmem.c
-@@ -51,7 +51,7 @@
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
--static struct tmem_hostops tmem_hostops;
-+static tmem_hostops_no_const tmem_hostops;
-
- static void tmem_objnode_tree_init(void);
-
-@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation.
- */
--static struct tmem_pamops tmem_pamops;
-+static tmem_pamops_no_const tmem_pamops;
-
- void tmem_register_pamops(struct tmem_pamops *m)
- {
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
-index d128ce2..a43980c 100644
+index d128ce2..fc1f9a1 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
-@@ -226,6 +226,7 @@ struct tmem_pamops {
+@@ -225,7 +225,7 @@ struct tmem_pamops {
+ bool (*is_remote)(void *);
int (*replace_in_obj)(void *, struct tmem_obj *);
#endif
- };
-+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+-};
++} __no_const;
extern void tmem_register_pamops(struct tmem_pamops *m);
/* memory allocation methods provided by the host implementation */
-@@ -235,6 +236,7 @@ struct tmem_hostops {
+@@ -234,7 +234,7 @@ struct tmem_hostops {
+ void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
- };
-+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+-};
++} __no_const;
extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
+diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
+index d3536f5..a0c2ce9 100644
+--- a/drivers/target/sbp/sbp_target.c
++++ b/drivers/target/sbp/sbp_target.c
+@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
+
+ #define SESSION_MAINTENANCE_INTERVAL HZ
+
+-static atomic_t login_id = ATOMIC_INIT(0);
++static atomic_unchecked_t login_id = ATOMIC_INIT(0);
+
+ static void session_maintenance_work(struct work_struct *);
+ static int sbp_run_transaction(struct fw_card *, int, int, int, int,
+@@ -444,7 +444,7 @@ static void sbp_management_request_login(
+ login->lun = se_lun;
+ login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
+ login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
+- login->login_id = atomic_inc_return(&login_id);
++ login->login_id = atomic_inc_return_unchecked(&login_id);
+
+ login->tgt_agt = sbp_target_agent_register(login);
+ if (IS_ERR(login->tgt_agt)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 4630481..c26782a 100644
--- a/drivers/target/target_core_device.c
@@ -45525,6 +47866,95 @@ index 81e939e..95ead10 100644
return 0;
return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
+index 4190199..48f2920 100644
+--- a/drivers/tty/hvc/hvsi.c
++++ b/drivers/tty/hvc/hvsi.c
+@@ -85,7 +85,7 @@ struct hvsi_struct {
+ int n_outbuf;
+ uint32_t vtermno;
+ uint32_t virq;
+- atomic_t seqno; /* HVSI packet sequence number */
++ atomic_unchecked_t seqno; /* HVSI packet sequence number */
+ uint16_t mctrl;
+ uint8_t state; /* HVSI protocol state */
+ uint8_t flags;
+@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
+
+ packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query_response);
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.verb = VSV_SEND_VERSION_NUMBER;
+ packet.u.version = HVSI_VERSION;
+ packet.query_seqno = query_seqno+1;
+@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
+
+ packet.hdr.type = VS_QUERY_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query);
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.verb = verb;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
+ int wrote;
+
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER,
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = sizeof(struct hvsi_control);
+ packet.verb = VSV_SET_MODEM_CTL;
+ packet.mask = HVSI_TSDTR;
+@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
+ BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
+
+ packet.hdr.type = VS_DATA_PACKET_HEADER;
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = count + sizeof(struct hvsi_header);
+ memcpy(&packet.data, buf, count);
+
+@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
+ struct hvsi_control packet __ALIGNED__;
+
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = 6;
+ packet.verb = VSV_CLOSE_PROTOCOL;
+
+diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
+index 3396eb9..6d3d540 100644
+--- a/drivers/tty/hvc/hvsi_lib.c
++++ b/drivers/tty/hvc/hvsi_lib.c
+@@ -9,7 +9,7 @@
+
+ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
+ {
+- packet->seqno = atomic_inc_return(&pv->seqno);
++ packet->seqno = atomic_inc_return_unchecked(&pv->seqno);
+
+ /* Assumes that always succeeds, works in practice */
+ return pv->put_chars(pv->termno, (char *)packet, packet->len);
+@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
+
+ /* Reset state */
+ pv->established = 0;
+- atomic_set(&pv->seqno, 0);
++ atomic_set_unchecked(&pv->seqno, 0);
+
+ pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
+
+@@ -265,7 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
+ pv->mctrl_update = 0;
+ q.hdr.type = VS_QUERY_PACKET_HEADER;
+ q.hdr.len = sizeof(struct hvsi_query);
+- q.hdr.seqno = atomic_inc_return(&pv->seqno);
++ q.hdr.seqno = atomic_inc_return_unchecked(&pv->seqno);
+ q.verb = VSV_SEND_MODEM_CTL_STATUS;
+ rc = hvsi_send_packet(pv, &q.hdr);
+ if (rc <= 0) {
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 8fd72ff..34a0bed 100644
--- a/drivers/tty/ipwireless/tty.c
@@ -45737,6 +48167,37 @@ index 354564e..fe50d9a 100644
atomic_dec(&rp_num_ports_open);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
spin_unlock_irqrestore(&info->port.lock, flags);
+diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
+index e2520ab..034e20b 100644
+--- a/drivers/tty/serial/ioc4_serial.c
++++ b/drivers/tty/serial/ioc4_serial.c
+@@ -437,7 +437,7 @@ struct ioc4_soft {
+ } is_intr_info[MAX_IOC4_INTR_ENTS];
+
+ /* Number of entries active in the above array */
+- atomic_t is_num_intrs;
++ atomic_unchecked_t is_num_intrs;
+ } is_intr_type[IOC4_NUM_INTR_TYPES];
+
+ /* is_ir_lock must be held while
+@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
+ BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
+ || (type == IOC4_OTHER_INTR_TYPE)));
+
+- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
++ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
+ BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
+
+ /* Save off the lower level interrupt handler */
+@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
+
+ soft = arg;
+ for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
+- num_intrs = (int)atomic_read(
++ num_intrs = (int)atomic_read_unchecked(
+ &soft->is_intr_type[intr_type].is_num_intrs);
+
+ this_mir = this_ir = pending_intrs(soft, intr_type);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 1002054..dd644a8 100644
--- a/drivers/tty/serial/kgdboc.c
@@ -45843,6 +48304,28 @@ index 1002054..dd644a8 100644
#ifdef CONFIG_KGDB_SERIAL_CONSOLE
/* This is only available if kgdboc is a built in for early debugging */
static int __init kgdboc_early_init(char *opt)
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index b11e997..6d25a3b 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -857,7 +857,7 @@ static struct uart_driver msm_uart_driver = {
+ .cons = MSM_CONSOLE,
+ };
+
+-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
++static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
+
+ static int __init msm_serial_probe(struct platform_device *pdev)
+ {
+@@ -867,7 +867,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
+ int irq;
+
+ if (pdev->id == -1)
+- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
++ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 0c8a9fa..234a95f 100644
--- a/drivers/tty/serial/samsung.c
@@ -46751,6 +49234,29 @@ index d53547d..6a22d02 100644
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index da2905a..834a569 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -27,6 +27,7 @@
+ #include <linux/freezer.h>
+ #include <linux/random.h>
+ #include <linux/pm_qos.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -4424,6 +4425,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ goto done;
+ return;
+ }
++
++ if (gr_handle_new_usb())
++ goto done;
++
+ if (hub_is_superspeed(hub->hdev))
+ unit_load = 150;
+ else
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 444d30e..f15c850 100644
--- a/drivers/usb/core/message.c
@@ -46790,6 +49296,19 @@ index b10da72..43aa0b2 100644
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f77083f..f3e2e34 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -550,8 +550,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ if (!usb_endpoint_xfer_isoc(desc))
+ return 0;
+
+- memset(&trb_link, 0, sizeof(trb_link));
+-
+ /* Link TRB for ISOC. The HWO bit is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 5e29dde..eca992f 100644
--- a/drivers/usb/early/ehci-dbgp.c
@@ -46916,6 +49435,28 @@ index b369292..9f3ba40 100644
gs_buf_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index ba6a5d6..f88f7f3 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -83,7 +83,7 @@ struct appledisplay {
+ spinlock_t lock;
+ };
+
+-static atomic_t count_displays = ATOMIC_INIT(0);
++static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
+ static struct workqueue_struct *wq;
+
+ static void appledisplay_complete(struct urb *urb)
+@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
+
+ /* Register backlight device */
+ snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
+- atomic_inc_return(&count_displays) - 1);
++ atomic_inc_return_unchecked(&count_displays) - 1);
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = 0xff;
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 5f3bcd3..bfca43f 100644
--- a/drivers/usb/serial/console.c
@@ -46983,7 +49524,7 @@ index d6bea3e..60b250e 100644
/**
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
-index 6ef94bc..1b41265 100644
+index 028fc83..65bb105 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -296,7 +296,7 @@ out:
@@ -46995,6 +49536,28 @@ index 6ef94bc..1b41265 100644
}
/*
+diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
+index 6d78736..65be90e 100644
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -486,7 +486,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
+ return 0;
+
+ /* TODO Prevent device auto probing */
+- WARN("Device %s added to live group %d!\n", dev_name(dev),
++ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
+ iommu_group_id(group->iommu_group));
+
+ return 0;
+@@ -506,7 +506,7 @@ static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
+ if (likely(!device))
+ return 0;
+
+- WARN("Device %s removed from live group %d!\n", dev_name(dev),
++ WARN(1, "Device %s removed from live group %d!\n", dev_name(dev),
+ iommu_group_id(group->iommu_group));
+
+ vfio_device_put(device);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 5174eba..86e764a 100644
--- a/drivers/vhost/vringh.c
@@ -47173,6 +49736,28 @@ index 098bfc6..796841d 100644
return -EINVAL;
if (!registered_fb[con2fb.framebuffer])
request_module("fb%d", con2fb.framebuffer);
+diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
+index d4d2c5f..ebbd113 100644
+--- a/drivers/video/hyperv_fb.c
++++ b/drivers/video/hyperv_fb.c
+@@ -233,7 +233,7 @@ static uint screen_fb_size;
+ static inline int synthvid_send(struct hv_device *hdev,
+ struct synthvid_msg *msg)
+ {
+- static atomic64_t request_id = ATOMIC64_INIT(0);
++ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
+ int ret;
+
+ msg->pipe_hdr.type = PIPE_MSG_DATA;
+@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
+
+ ret = vmbus_sendpacket(hdev->channel, msg,
+ msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
+- atomic64_inc_return(&request_id),
++ atomic64_inc_return_unchecked(&request_id),
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret)
diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
index 7672d2e..b56437f 100644
--- a/drivers/video/i810/i810_accel.c
@@ -50440,6 +53025,28 @@ index 370b24c..ff0be7b 100644
---help---
A.out (Assembler.OUTput) is a set of formats for libraries and
executables used in the earliest versions of UNIX. Linux used
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 789bc25..fafaeea 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ struct afs_vnode *vnode;
+ struct super_block *sb;
+ struct inode *inode;
+- static atomic_t afs_autocell_ino;
++ static atomic_unchecked_t afs_autocell_ino;
+
+ _enter("{%x:%u},%*.*s,",
+ AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ data.fid.unique = 0;
+ data.fid.vnode = 0;
+
+- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
++ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
+ afs_iget5_autocell_test, afs_iget5_set,
+ &data);
+ if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
index 2bbcacf..8614116 100644
--- a/fs/aio.c
@@ -51650,10 +54257,10 @@ index d50bbe5..af3b649 100644
goto err;
}
diff --git a/fs/bio.c b/fs/bio.c
-index 94bbc04..6fe78a4 100644
+index c5eae72..599e3cf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
-@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -51662,7 +54269,7 @@ index 94bbc04..6fe78a4 100644
return ERR_PTR(-EINVAL);
nr_pages += end - start;
-@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
+@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -51671,7 +54278,7 @@ index 94bbc04..6fe78a4 100644
return ERR_PTR(-EINVAL);
nr_pages += end - start;
-@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
+@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
const int read = bio_data_dir(bio) == READ;
struct bio_map_data *bmd = bio->bi_private;
int i;
@@ -51713,6 +54320,59 @@ index 7fb054b..ad36c67 100644
parent_start = 0;
WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index f26f38c..3d0f149 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -458,7 +458,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+
+ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
+ {
+- int seq = atomic_inc_return(&delayed_root->items_seq);
++ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
+ if ((atomic_dec_return(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
+ waitqueue_active(&delayed_root->wait))
+@@ -1391,7 +1391,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+ static int refs_newer(struct btrfs_delayed_root *delayed_root,
+ int seq, int count)
+ {
+- int val = atomic_read(&delayed_root->items_seq);
++ int val = atomic_read_unchecked(&delayed_root->items_seq);
+
+ if (val < seq || val >= seq + count)
+ return 1;
+@@ -1408,7 +1408,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ return;
+
+- seq = atomic_read(&delayed_root->items_seq);
++ seq = atomic_read_unchecked(&delayed_root->items_seq);
+
+ if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+ int ret;
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index 1d5c5f7..0ba0afc 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
+ */
+ struct list_head prepare_list;
+ atomic_t items; /* for delayed items */
+- atomic_t items_seq; /* for delayed items */
++ atomic_unchecked_t items_seq; /* for delayed items */
+ int nodes; /* for delayed nodes */
+ wait_queue_head_t wait;
+ };
+@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
+ struct btrfs_delayed_root *delayed_root)
+ {
+ atomic_set(&delayed_root->items, 0);
+- atomic_set(&delayed_root->items_seq, 0);
++ atomic_set_unchecked(&delayed_root->items_seq, 0);
+ delayed_root->nodes = 0;
+ spin_lock_init(&delayed_root->lock);
+ init_waitqueue_head(&delayed_root->wait);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0f81d67..0ad55fe 100644
--- a/fs/btrfs/ioctl.c
@@ -51934,6 +54594,28 @@ index f02d82b..2632cf86 100644
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 7d377c9..3fb6559 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
+ /*
+ * construct our own bdi so we can control readahead, etc.
+ */
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ static int ceph_register_bdi(struct super_block *sb,
+ struct ceph_fs_client *fsc)
+@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
+ default_backing_dev_info.ra_pages;
+
+ err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
+- atomic_long_inc_return(&bdi_seq));
++ atomic_long_inc_return_unchecked(&bdi_seq));
+ if (!err)
+ sb->s_bdi = &fsc->backing_dev_info;
+ return err;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index d597483..747901b 100644
--- a/fs/cifs/cifs_debug.c
@@ -52823,7 +55505,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index ffd7a81..3c84660 100644
+index 1f44670..3c84660 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,8 +55,20 @@
@@ -53030,24 +55712,6 @@ index ffd7a81..3c84660 100644
/*
* cover the whole range: [new_start, old_end)
*/
-@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- return -ENOMEM;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
- if (new_end > old_start) {
- /*
- * when the old and new regions overlap clear from new_end.
-@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- free_pgd_range(&tlb, old_start, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- }
-- tlb_finish_mmu(&tlb, new_end, old_end);
-+ tlb_finish_mmu(&tlb, old_start, old_end);
-
- /*
- * Shrink the vma to just the new range. Always succeeds.
@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
@@ -54055,9 +56719,18 @@ index d8ac61d..79a36f0 100644
.seq = SEQCNT_ZERO,
.umask = 0022,
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
-index e2cba1f..17a25bb 100644
+index e2cba1f..20319c5 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
+@@ -19,7 +19,7 @@
+
+ struct kmem_cache *fscache_cookie_jar;
+
+-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
++static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
+
+ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
+ static int fscache_alloc_object(struct fscache_cache *cache,
@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
parent ? (char *) parent->def->name : "<no-parent>",
def->name, netfs_data);
@@ -54123,7 +56796,7 @@ index e2cba1f..17a25bb 100644
_leave(" = -ENOMEDIUM [no cache]");
return -ENOMEDIUM;
}
-@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
+@@ -255,14 +255,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
object = cache->ops->alloc_object(cache, cookie);
fscache_stat_d(&fscache_n_cop_alloc_object);
if (IS_ERR(object)) {
@@ -54136,8 +56809,11 @@ index e2cba1f..17a25bb 100644
- fscache_stat(&fscache_n_object_alloc);
+ fscache_stat_unchecked(&fscache_n_object_alloc);
- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
++ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
+ _debug("ALLOC OBJ%x: %s {%lx}",
+ object->debug_id, cookie->def->name, object->events);
@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
_enter("{%s}", cookie->def->name);
@@ -56260,7 +58936,7 @@ index 9ed9361..2b72db1 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 7b1ca9b..6faeccf 100644
+index a45ba4f..44cfe66 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
@@ -56328,6 +59004,24 @@ index 7b1ca9b..6faeccf 100644
return retval;
}
+@@ -2344,7 +2363,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+ * number incrementing at 10Ghz will take 12,427 years to wrap which
+ * is effectively never, so we can ignore the possibility.
+ */
+-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
++static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
+
+ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+ {
+@@ -2359,7 +2378,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+ kfree(new_ns);
+ return ERR_PTR(ret);
+ }
+- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
++ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
+ atomic_set(&new_ns->count, 1);
+ new_ns->root = NULL;
+ INIT_LIST_HEAD(&new_ns->list);
@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
@@ -56668,9 +59362,18 @@ index aa411c3..c260a84 100644
"inode 0x%lx or driver bug.", vdir->i_ino);
goto err_out;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
-index c5670b8..01a3656 100644
+index c5670b8..2b43d9b 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
+@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
+ char *addr;
+ size_t total = 0;
+ unsigned len;
+- int left;
++ unsigned left;
+
+ do {
+ len = PAGE_CACHE_SIZE - ofs;
@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
#endif /* NTFS_RW */
};
@@ -56680,6 +59383,37 @@ index c5670b8..01a3656 100644
-const struct inode_operations ntfs_empty_inode_ops = {};
+const struct inode_operations ntfs_empty_inode_ops __read_only;
+diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
+index 82650d5..db37dcf 100644
+--- a/fs/ntfs/super.c
++++ b/fs/ntfs/super.c
+@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ if (!silent)
+ ntfs_error(sb, "Primary boot sector is invalid.");
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "primary");
++ ntfs_error(sb, read_err_str, "%s", "primary");
+ if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
+ if (bh_primary)
+ brelse(bh_primary);
+@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ goto hotfix_primary_boot_sector;
+ brelse(bh_backup);
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "backup");
++ ntfs_error(sb, read_err_str, "%s", "backup");
+ /* Try to read NT3.51- backup boot sector. */
+ if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
+ if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ "sector.");
+ brelse(bh_backup);
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "backup");
++ ntfs_error(sb, read_err_str, "%s", "backup");
+ /* We failed. Cleanup and return. */
+ if (bh_primary)
+ brelse(bh_primary);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 20dfec7..e238cb7 100644
--- a/fs/ocfs2/aops.c
@@ -58453,7 +61187,7 @@ index 6b6a993..807cccc 100644
kfree(s);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index 3e636d8..350cc48 100644
+index 65fc60a..350cc48 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -11,12 +11,19 @@
@@ -58620,34 +61354,6 @@ index 3e636d8..350cc48 100644
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
-@@ -792,14 +843,14 @@ typedef struct {
- } pagemap_entry_t;
-
- struct pagemapread {
-- int pos, len;
-+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
- pagemap_entry_t *buffer;
- };
-
- #define PAGEMAP_WALK_SIZE (PMD_SIZE)
- #define PAGEMAP_WALK_MASK (PMD_MASK)
-
--#define PM_ENTRY_BYTES sizeof(u64)
-+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
- #define PM_STATUS_BITS 3
- #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
- #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
- if (!count)
- goto out_task;
-
-- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
-+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
- ret = -ENOMEM;
- if (!pm.buffer)
- goto out_task;
@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
int n;
char buffer[50];
@@ -59592,10 +62298,10 @@ index ca9ecaa..60100c7 100644
kfree(s);
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..712a85d
+index 0000000..76e84b9
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1043 @@
+@@ -0,0 +1,1063 @@
+#
+# grecurity configuration
+#
@@ -60567,6 +63273,26 @@ index 0000000..712a85d
+ option with name "socket_server_gid" is created.
+
+endmenu
++
++menu "Physical Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DENYUSB
++ bool "Deny new USB connections after toggle"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, a new sysctl option with name "deny_new_usb"
++ will be created. Setting its value to 1 will prevent any new
++ USB devices from being recognized by the OS. Any attempted USB
++ device insertion will be logged. This option is intended to be
++ used against custom USB devices designed to exploit vulnerabilities
++ in various USB device drivers.
++
++ For greatest effectiveness, this sysctl should be set after any
++ relevant init scripts. Once set, it cannot be unset.
++
++endmenu
++
+menu "Sysctl Support"
+depends on GRKERNSEC && SYSCTL
+
@@ -60641,10 +63367,10 @@ index 0000000..712a85d
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
-index 0000000..36845aa
+index 0000000..b0b77d5
--- /dev/null
+++ b/grsecurity/Makefile
-@@ -0,0 +1,42 @@
+@@ -0,0 +1,43 @@
+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
+# during 2001-2009 it has been completely redesigned by Brad Spengler
+# into an RBAC system
@@ -60657,7 +63383,8 @@ index 0000000..36845aa
+
+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
-+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
++ grsec_usb.o
+
+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
@@ -67942,10 +70669,10 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..ab2d875
+index 0000000..836f38f
--- /dev/null
+++ b/grsecurity/grsec_init.c
-@@ -0,0 +1,279 @@
+@@ -0,0 +1,280 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -67974,6 +70701,7 @@ index 0000000..ab2d875
+int grsec_enable_chdir;
+int grsec_enable_mount;
+int grsec_enable_rofs;
++int grsec_deny_new_usb;
+int grsec_enable_chroot_findtask;
+int grsec_enable_chroot_mount;
+int grsec_enable_chroot_shmat;
@@ -69341,10 +72069,10 @@ index 0000000..4030d57
+}
diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
new file mode 100644
-index 0000000..7624d1c
+index 0000000..a9e378f
--- /dev/null
+++ b/grsecurity/grsec_sysctl.c
-@@ -0,0 +1,460 @@
+@@ -0,0 +1,472 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
@@ -69365,11 +72093,12 @@ index 0000000..7624d1c
+ return 0;
+}
+
-+#ifdef CONFIG_GRKERNSEC_ROFS
-+static int __maybe_unused one = 1;
++#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
++static int __maybe_unused __read_only one = 1;
+#endif
+
-+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
++ defined(CONFIG_GRKERNSEC_DENYUSB)
+struct ctl_table grsecurity_table[] = {
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
@@ -69802,6 +72531,17 @@ index 0000000..7624d1c
+ .extra2 = &one,
+ },
+#endif
++#ifdef CONFIG_GRKERNSEC_DENYUSB
++ {
++ .procname = "deny_new_usb",
++ .data = &grsec_deny_new_usb,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one,
++ },
++#endif
+ { }
+};
+#endif
@@ -69906,6 +72646,27 @@ index 0000000..ee57dcf
+#endif
+ return 1;
+}
+diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
+new file mode 100644
+index 0000000..ae02d8e
+--- /dev/null
++++ b/grsecurity/grsec_usb.c
+@@ -0,0 +1,15 @@
++#include <linux/kernel.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++int gr_handle_new_usb(void)
++{
++#ifdef CONFIG_GRKERNSEC_DENYUSB
++ if (grsec_deny_new_usb) {
++ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
++ return 1;
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL_GPL(gr_handle_new_usb);
diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
new file mode 100644
index 0000000..9f7b1ac
@@ -70560,19 +73321,6 @@ index a59ff51..2594a70 100644
#endif /* CONFIG_MMU */
#endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
-index 13821c3..5672d7e 100644
---- a/include/asm-generic/tlb.h
-+++ b/include/asm-generic/tlb.h
-@@ -112,7 +112,7 @@ struct mmu_gather {
-
- #define HAVE_GENERIC_MMU_GATHER
-
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
- void tlb_flush_mmu(struct mmu_gather *tlb);
- void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- unsigned long end);
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index c184aa8..d049942 100644
--- a/include/asm-generic/uaccess.h
@@ -72305,10 +75053,10 @@ index 0000000..be66033
+#endif
diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
new file mode 100644
-index 0000000..fd8598b
+index 0000000..e337683
--- /dev/null
+++ b/include/linux/grinternal.h
-@@ -0,0 +1,228 @@
+@@ -0,0 +1,229 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
+
@@ -72357,6 +75105,7 @@ index 0000000..fd8598b
+extern int grsec_enable_forkfail;
+extern int grsec_enable_time;
+extern int grsec_enable_rofs;
++extern int grsec_deny_new_usb;
+extern int grsec_enable_chroot_shmat;
+extern int grsec_enable_chroot_mount;
+extern int grsec_enable_chroot_double;
@@ -72658,10 +75407,10 @@ index 0000000..a4396b5
+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
new file mode 100644
-index 0000000..3676b0b
+index 0000000..d6f5a21
--- /dev/null
+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,242 @@
+@@ -0,0 +1,244 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+#include <linux/fs.h>
@@ -72683,6 +75432,8 @@ index 0000000..3676b0b
+#error "CONFIG_PAX enabled, but no PaX options are enabled."
+#endif
+
++int gr_handle_new_usb(void);
++
+void gr_handle_brute_attach(unsigned long mm_flags);
+void gr_handle_brute_check(void);
+void gr_handle_kernel_exploit(void);
@@ -72929,6 +75680,35 @@ index 0000000..e7ffaaf
+ const int protocol);
+
+#endif
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 0c48991..76e41d8 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -393,10 +393,12 @@ struct hid_report {
+ struct hid_device *device; /* associated device */
+ };
+
++#define HID_MAX_IDS 256
++
+ struct hid_report_enum {
+ unsigned numbered;
+ struct list_head report_list;
+- struct hid_report *report_id_hash[256];
++ struct hid_report *report_id_hash[HID_MAX_IDS];
+ };
+
+ #define HID_REPORT_TYPES 3
+@@ -747,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
+ struct hid_device *hid_allocate_device(void);
+ struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
++struct hid_report *hid_validate_report(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int fields,
++ unsigned int report_counts);
+ int hid_open_report(struct hid_device *device);
+ int hid_check_keys_pressed(struct hid_device *hid);
+ int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7fb31da..08b5114 100644
--- a/include/linux/highmem.h
@@ -73690,7 +76470,7 @@ index e0c8528..bcf0c29 100644
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index ace9a5f..81bdb59 100644
+index 4a189ba..04101d6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -289,6 +289,8 @@ struct vm_area_struct {
@@ -73702,7 +76482,7 @@ index ace9a5f..81bdb59 100644
};
struct core_thread {
-@@ -437,6 +439,24 @@ struct mm_struct {
+@@ -438,6 +440,24 @@ struct mm_struct {
int first_nid;
#endif
struct uprobes_state uprobes_state;
@@ -74638,7 +77418,7 @@ index 6dacb93..6174423 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 178a8d9..450bf11 100644
+index 178a8d9..918ea01 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -62,6 +62,7 @@ struct bio_list;
@@ -74658,7 +77438,7 @@ index 178a8d9..450bf11 100644
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -314,6 +315,19 @@ struct nsproxy;
+@@ -314,6 +315,18 @@ struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
@@ -74674,11 +77454,10 @@ index 178a8d9..450bf11 100644
+
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
-+
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -591,6 +605,17 @@ struct signal_struct {
+@@ -591,6 +604,17 @@ struct signal_struct {
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
@@ -74696,7 +77475,7 @@ index 178a8d9..450bf11 100644
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
-@@ -671,6 +696,14 @@ struct user_struct {
+@@ -671,6 +695,14 @@ struct user_struct {
struct key *session_keyring; /* UID's default session keyring */
#endif
@@ -74711,7 +77490,7 @@ index 178a8d9..450bf11 100644
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
-@@ -1158,8 +1191,8 @@ struct task_struct {
+@@ -1158,8 +1190,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -74722,7 +77501,7 @@ index 178a8d9..450bf11 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1184,11 +1217,6 @@ struct task_struct {
+@@ -1184,11 +1216,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -74734,7 +77513,7 @@ index 178a8d9..450bf11 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1205,6 +1233,10 @@ struct task_struct {
+@@ -1205,6 +1232,10 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -74745,7 +77524,7 @@ index 178a8d9..450bf11 100644
/* filesystem information */
struct fs_struct *fs;
/* open file information */
-@@ -1278,6 +1310,10 @@ struct task_struct {
+@@ -1278,6 +1309,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
@@ -74756,7 +77535,7 @@ index 178a8d9..450bf11 100644
/* journalling filesystem info */
void *journal_info;
-@@ -1316,6 +1352,10 @@ struct task_struct {
+@@ -1316,6 +1351,10 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -74767,7 +77546,7 @@ index 178a8d9..450bf11 100644
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
-@@ -1416,8 +1456,76 @@ struct task_struct {
+@@ -1416,8 +1455,76 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -74844,7 +77623,7 @@ index 178a8d9..450bf11 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1476,7 +1584,7 @@ struct pid_namespace;
+@@ -1476,7 +1583,7 @@ struct pid_namespace;
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
@@ -74853,7 +77632,7 @@ index 178a8d9..450bf11 100644
{
return tsk->pid;
}
-@@ -1919,7 +2027,9 @@ void yield(void);
+@@ -1919,7 +2026,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -74863,7 +77642,7 @@ index 178a8d9..450bf11 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -1952,6 +2061,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -74871,7 +77650,7 @@ index 178a8d9..450bf11 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2118,7 +2228,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -74880,7 +77659,7 @@ index 178a8d9..450bf11 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2309,9 +2419,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -75446,7 +78225,7 @@ index a5ffd32..0935dea 100644
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 4147d70..d356a10 100644
+index 84662ec..d8f8adb 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -97,8 +97,12 @@ struct sigaltstack;
@@ -75951,6 +78730,25 @@ index c586679..f06b389 100644
}
static inline void __dec_zone_page_state(struct page *page,
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 623488f..44b5742 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -410,11 +410,11 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+
+ #define create_workqueue(name) \
+- alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
++ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+ #define create_freezable_workqueue(name) \
+- alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
++ alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+ #define create_singlethread_workqueue(name) \
+- alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
++ alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+
+ extern void destroy_workqueue(struct workqueue_struct *wq);
+
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index fdbafc6..49dfe4f 100644
--- a/include/linux/xattr.h
@@ -76008,6 +78806,19 @@ index 95d1c91..6798cca 100644
/*
* Newer version of video_device, handled by videodev2.c
+diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
+index c9b1593..a572459 100644
+--- a/include/media/v4l2-device.h
++++ b/include/media/v4l2-device.h
+@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
+ this function returns 0. If the name ends with a digit (e.g. cx18),
+ then the name will be set to cx18-0 since cx180 looks really odd. */
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance);
++ atomic_unchecked_t *instance);
+
+ /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
+ Since the parent disappears this ensures that v4l2_dev doesn't have an
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index adcbb20..62c2559 100644
--- a/include/net/9p/transport.h
@@ -76110,7 +78921,7 @@ index de2c785..0588a6b 100644
/** inet_connection_sock - INET connection oriented sock
*
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
-index 53f464d..ba76aaa 100644
+index 53f464d..0bd0b49 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -47,8 +47,8 @@ struct inet_peer {
@@ -76124,20 +78935,28 @@ index 53f464d..ba76aaa 100644
};
struct rcu_head rcu;
struct inet_peer *gc_next;
-@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
+@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
+ /* can be called with or without local BH being disabled */
+ static inline int inet_getid(struct inet_peer *p, int more)
+ {
+- int old, new;
++ int id;
more++;
inet_peer_refcheck(p);
- do {
+- do {
- old = atomic_read(&p->ip_id_count);
-+ old = atomic_read_unchecked(&p->ip_id_count);
- new = old + more;
- if (!new)
- new = 1;
+- new = old + more;
+- if (!new)
+- new = 1;
- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
-+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
- return new;
+- return new;
++ id = atomic_add_return_unchecked(more, &p->ip_id_count);
++ if (!id)
++ id = atomic_inc_return_unchecked(&p->ip_id_count);
++ return id;
}
+ #endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index a68f838..74518ab 100644
--- a/include/net/ip.h
@@ -78632,7 +81451,7 @@ index e76e495..cbfe63a 100644
/*
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
-index ca65997..cc8cee4 100644
+index ca65997..60df03d 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
@@ -78640,11 +81459,12 @@ index ca65997..cc8cee4 100644
}
-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
+-static inline unsigned int \
+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
- static inline unsigned int \
++static inline unsigned long \
func_name(struct perf_output_handle *handle, \
- const void *buf, unsigned int len) \
-+ const void user *buf, unsigned int len) \
++ const void user *buf, unsigned long len) \
{ \
unsigned long size, written; \
\
@@ -78669,6 +81489,19 @@ index ca65997..cc8cee4 100644
/* Callchain handling */
extern struct perf_callchain_entry *
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index f356974..cb8c570 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
+ {
+ struct page *page;
+ uprobe_opcode_t opcode;
+- int result;
++ long result;
+
+ pagefault_disable();
+ result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
diff --git a/kernel/exit.c b/kernel/exit.c
index 7bb73f9..d7978ed 100644
--- a/kernel/exit.c
@@ -78730,7 +81563,7 @@ index 7bb73f9..d7978ed 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..11ee8a5 100644
+index ffbc090..08ceeee 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -79025,7 +81858,7 @@ index 987b28a..11ee8a5 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
+@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -79034,7 +81867,7 @@ index 987b28a..11ee8a5 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -79043,7 +81876,7 @@ index 987b28a..11ee8a5 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -79054,7 +81887,7 @@ index 987b28a..11ee8a5 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 49dacfb..5c6b450 100644
+index 49dacfb..2ac4526 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -79077,6 +81910,15 @@ index 49dacfb..5c6b450 100644
/*
* The futex address must be "naturally" aligned.
*/
+@@ -440,7 +446,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+
+ static int get_futex_value_locked(u32 *dest, u32 __user *from)
+ {
+- int ret;
++ unsigned long ret;
+
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
{
u32 curval;
@@ -80739,7 +83581,7 @@ index 42670e9..8719c2f 100644
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 424c2d4..a9194f7 100644
+index 424c2d4..679242f 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -43,6 +43,7 @@
@@ -80831,6 +83673,15 @@ index 424c2d4..a9194f7 100644
}
static int common_timer_create(struct k_itimer *new_timer)
+@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct k_itimer *new_timer;
+ int error, new_timer_id;
+- sigevent_t event;
++ sigevent_t event = { };
+ int it_id_set = IT_ID_NOT_SET;
+
+ if (!kc)
@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
if (copy_from_user(&new_tp, tp, sizeof (*tp)))
return -EFAULT;
@@ -82044,7 +84895,7 @@ index e8b3350..d83d44e 100644
.priority = CPU_PRI_MIGRATION,
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index c61a614..d7f3d7e 100644
+index 03b73be..9422b9f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
@@ -82056,7 +84907,7 @@ index c61a614..d7f3d7e 100644
p->mm->numa_scan_offset = 0;
}
-@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -83062,10 +85913,10 @@ index b8b8560..75b1a09 100644
ret = -EIO;
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 6c508ff..ee55a13 100644
+index f23449d..b8cc3a1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -1925,12 +1925,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -83085,7 +85936,7 @@ index 6c508ff..ee55a13 100644
}
/*
-@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -3994,8 +3999,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -83096,7 +85947,7 @@ index 6c508ff..ee55a13 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+@@ -4718,8 +4725,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
@@ -83105,7 +85956,7 @@ index 6c508ff..ee55a13 100644
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
-@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+@@ -4863,6 +4868,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
@@ -83116,7 +85967,7 @@ index 6c508ff..ee55a13 100644
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
-@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+@@ -4876,7 +4885,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
goto out;
}
@@ -83403,10 +86254,10 @@ index e444ff8..438b8f4 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 06a5bce..53ad6e7 100644
+index 0582a01..310bed1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3347,7 +3347,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3327,7 +3327,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -83428,11 +86279,31 @@ index 51b4448..7be601f 100644
/*
* Normal trace_printk() and friends allocates special buffers
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 26dc348..8708ca7 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
+ return now;
+ }
+
+-static atomic64_t trace_counter;
++static atomic64_unchecked_t trace_counter;
+
+ /*
+ * trace_clock_counter(): simply an atomic counter.
+@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
+ */
+ u64 notrace trace_clock_counter(void)
+ {
+- return atomic64_add_return(1, &trace_counter);
++ return atomic64_inc_return_unchecked(&trace_counter);
+ }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 6953263..2004e16 100644
+index 3d18aad..d1be0eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
+@@ -1794,10 +1794,6 @@ static LIST_HEAD(ftrace_module_file_list);
struct ftrace_module_file_ops {
struct list_head list;
struct module *mod;
@@ -83443,7 +86314,7 @@ index 6953263..2004e16 100644
};
static struct ftrace_module_file_ops *
-@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
+@@ -1838,17 +1834,12 @@ trace_create_file_ops(struct module *mod)
file_ops->mod = mod;
@@ -83467,7 +86338,7 @@ index 6953263..2004e16 100644
list_add(&file_ops->list, &ftrace_module_file_list);
-@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
+@@ -1941,8 +1932,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
struct ftrace_module_file_ops *file_ops)
{
return __trace_add_new_event(call, tr,
@@ -84318,9 +87189,18 @@ index e742d06..c56fdd8 100644
config NOMMU_INITIAL_TRIM_EXCESS
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index 5025174..9fc1c5c 100644
+index 5025174..9d67dcd 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
+@@ -12,7 +12,7 @@
+ #include <linux/device.h>
+ #include <trace/events/writeback.h>
+
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
unsigned int cap)
@@ -84335,12 +87215,12 @@ index 5025174..9fc1c5c 100644
- sprintf(tmp, "%.28s%s", name, "-%d");
- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
-+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
++ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
if (err) {
bdi_destroy(bdi);
return err;
diff --git a/mm/filemap.c b/mm/filemap.c
-index 7905fe7..e60faa8 100644
+index 7905fe7..f59502b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -84352,6 +87232,42 @@ index 7905fe7..e60faa8 100644
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
return 0;
+@@ -1948,7 +1948,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+
+ while (bytes) {
+ char __user *buf = iov->iov_base + base;
+- int copy = min(bytes, iov->iov_len - base);
++ size_t copy = min(bytes, iov->iov_len - base);
+
+ base = 0;
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
+@@ -1977,7 +1977,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ BUG_ON(!in_atomic());
+ kaddr = kmap_atomic(page);
+ if (likely(i->nr_segs == 1)) {
+- int left;
++ size_t left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+@@ -2005,7 +2005,7 @@ size_t iov_iter_copy_from_user(struct page *page,
+
+ kaddr = kmap(page);
+ if (likely(i->nr_segs == 1)) {
+- int left;
++ size_t left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+@@ -2035,7 +2035,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
+ * zero-length segments (without overruning the iovec).
+ */
+ while (bytes || unlikely(i->count && !iov->iov_len)) {
+- int copy;
++ size_t copy;
+
+ copy = min(bytes, iov->iov_len - base);
+ BUG_ON(!i->count || i->count < copy);
@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
*pos = i_size_read(inode);
@@ -84405,7 +87321,7 @@ index b32b70c..e512eb0 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5cf99bf..5c01c2f 100644
+index 7c5eb85..5c01c2f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
@@ -84450,15 +87366,6 @@ index 5cf99bf..5c01c2f 100644
if (ret)
goto out;
-@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-
- mm = vma->vm_mm;
-
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- tlb_finish_mmu(&tlb, start, end);
- }
@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -84811,39 +87718,10 @@ index ceb0c7f..b2b8e94 100644
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
-index 5e50800..7c0340f 100644
+index 5a35443..7c0340f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
- * tear-down from @mm. The @fullmm argument is used when @mm is without
- * users and we're going to destroy the full address space (exit/execve).
- */
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- tlb->mm = mm;
-
-- tlb->fullmm = fullmm;
-+ /* Is it from 0 to ~0? */
-+ tlb->fullmm = !(start | (end+1));
- tlb->need_flush_all = 0;
-- tlb->start = -1UL;
-- tlb->end = 0;
-+ tlb->start = start;
-+ tlb->end = end;
- tlb->need_flush = 0;
- tlb->local.next = NULL;
- tlb->local.nr = 0;
-@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
- {
- struct mmu_gather_batch *batch, *next;
-
-- tlb->start = start;
-- tlb->end = end;
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
-@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
@@ -84851,7 +87729,7 @@ index 5e50800..7c0340f 100644
start &= PUD_MASK;
if (start < floor)
return;
-@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
@@ -84860,7 +87738,7 @@ index 5e50800..7c0340f 100644
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
@@ -84868,7 +87746,7 @@ index 5e50800..7c0340f 100644
start &= PGDIR_MASK;
if (start < floor)
return;
-@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
@@ -84877,65 +87755,7 @@ index 5e50800..7c0340f 100644
}
/*
-@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
- spinlock_t *ptl;
- pte_t *start_pte;
- pte_t *pte;
-- unsigned long range_start = addr;
-
- again:
- init_rss_vec(rss);
-@@ -1204,17 +1208,25 @@ again:
- * and page-free while holding it.
- */
- if (force_flush) {
-+ unsigned long old_end;
-+
- force_flush = 0;
-
--#ifdef HAVE_GENERIC_MMU_GATHER
-- tlb->start = range_start;
-+ /*
-+ * Flush the TLB just for the previous segment,
-+ * then update the range to be the remaining
-+ * TLB range.
-+ */
-+ old_end = tlb->end;
- tlb->end = addr;
--#endif
-+
- tlb_flush_mmu(tlb);
-- if (addr != end) {
-- range_start = addr;
-+
-+ tlb->start = addr;
-+ tlb->end = old_end;
-+
-+ if (addr != end)
- goto again;
-- }
- }
-
- return addr;
-@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end = start + size;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- update_hiwater_rss(mm);
- mmu_notifier_invalidate_range_start(mm, start, end);
- for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
-@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
- unsigned long end = address + size;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, address, end);
- update_hiwater_rss(mm);
- mmu_notifier_invalidate_range_start(mm, address, end);
- unmap_single_vma(&tlb, vma, address, end, details);
-@@ -1638,12 +1650,6 @@ no_page_table:
+@@ -1644,12 +1650,6 @@ no_page_table:
return page;
}
@@ -84948,7 +87768,7 @@ index 5e50800..7c0340f 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -84961,7 +87781,7 @@ index 5e50800..7c0340f 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -84970,7 +87790,7 @@ index 5e50800..7c0340f 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -84982,7 +87802,7 @@ index 5e50800..7c0340f 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1895,7 +1896,7 @@ next_page:
+@@ -1901,7 +1896,7 @@ next_page:
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
@@ -84991,7 +87811,7 @@ index 5e50800..7c0340f 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -85002,7 +87822,7 @@ index 5e50800..7c0340f 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -85024,7 +87844,7 @@ index 5e50800..7c0340f 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -85032,7 +87852,7 @@ index 5e50800..7c0340f 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -85043,7 +87863,7 @@ index 5e50800..7c0340f 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -85054,7 +87874,7 @@ index 5e50800..7c0340f 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -85241,7 +88061,7 @@ index 5e50800..7c0340f 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2802,6 +3004,12 @@ gotten:
+@@ -2808,6 +3004,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -85254,7 +88074,7 @@ index 5e50800..7c0340f 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2853,6 +3061,10 @@ gotten:
+@@ -2859,6 +3061,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -85265,7 +88085,7 @@ index 5e50800..7c0340f 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -85277,7 +88097,7 @@ index 5e50800..7c0340f 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -85289,7 +88109,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3170,40 +3392,6 @@ out_release:
+@@ -3176,40 +3392,6 @@ out_release:
}
/*
@@ -85330,7 +88150,7 @@ index 5e50800..7c0340f 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -85363,7 +88183,7 @@ index 5e50800..7c0340f 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -85375,7 +88195,7 @@ index 5e50800..7c0340f 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3263,6 +3452,12 @@ setpte:
+@@ -3269,6 +3452,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -85388,7 +88208,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -85401,7 +88221,7 @@ index 5e50800..7c0340f 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -85416,7 +88236,7 @@ index 5e50800..7c0340f 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -85429,7 +88249,7 @@ index 5e50800..7c0340f 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -85440,7 +88260,7 @@ index 5e50800..7c0340f 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -85475,7 +88295,7 @@ index 5e50800..7c0340f 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -85499,7 +88319,7 @@ index 5e50800..7c0340f 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -85530,7 +88350,7 @@ index 5e50800..7c0340f 100644
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
+@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -85539,7 +88359,7 @@ index 5e50800..7c0340f 100644
return 0;
}
-@@ -4048,8 +4336,8 @@ out:
+@@ -4054,8 +4336,8 @@ out:
return ret;
}
@@ -85550,7 +88370,7 @@ index 5e50800..7c0340f 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -85561,7 +88381,7 @@ index 5e50800..7c0340f 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -85570,7 +88390,7 @@ index 5e50800..7c0340f 100644
void *maddr;
struct page *page = NULL;
-@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -85581,7 +88401,7 @@ index 5e50800..7c0340f 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -85758,7 +88578,7 @@ index 79b7cf7..9944291 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index 7dbe397..bfb7626 100644
+index 8d25fdc..bfb7626 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,7 @@
@@ -86627,15 +89447,6 @@ index 7dbe397..bfb7626 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
- struct mmu_gather tlb;
-
- lru_add_drain();
-- tlb_gather_mmu(&tlb, mm, 0);
-+ tlb_gather_mmu(&tlb, mm, start, end);
- update_hiwater_rss(mm);
- unmap_vmas(&tlb, vma, start, end);
- free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
@@ -86941,15 +89752,6 @@ index 7dbe397..bfb7626 100644
return addr;
}
-@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
-
- lru_add_drain();
- flush_cache_mm(mm);
-- tlb_gather_mmu(&tlb, mm, 1);
-+ tlb_gather_mmu(&tlb, mm, 0, -1);
- /* update_hiwater_rss(mm) here? but nobody should be looking */
- /* Use -1 here to ensure all VMAs in the mm are unmapped */
- unmap_vmas(&tlb, vma, 0, -1);
@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
@@ -89972,6 +92774,28 @@ index 3ee690e..00d581b 100644
register_netdevice_notifier(&notifier);
if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index eb0a46a..5f3bae8 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
+ #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
+
+ static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
+-static atomic_t addr_str_seq = ATOMIC_INIT(0);
++static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
+
+ static struct page *zero_page; /* used in certain error cases */
+
+@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
+ struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
+
+- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
++ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
+ s = addr_str[i];
+
+ switch (ss->ss_family) {
diff --git a/net/compat.c b/net/compat.c
index f0a1ba6..0541331 100644
--- a/net/compat.c
@@ -90407,6 +93231,28 @@ index f9765203..9feaef8 100644
mutex_unlock(&net_mutex);
return error;
}
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index cec074b..a53a938 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -428,7 +428,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+ struct udphdr *udph;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+- static atomic_t ip_ident;
++ static atomic_unchecked_t ip_ident;
+ struct ipv6hdr *ip6h;
+
+ udp_len = len + sizeof(*udph);
+@@ -499,7 +499,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+ put_unaligned(0x45, (unsigned char *)iph);
+ iph->tos = 0;
+ put_unaligned(htons(ip_len), &(iph->tot_len));
+- iph->id = htons(atomic_inc_return(&ip_ident));
++ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
+ iph->frag_off = 0;
+ iph->ttl = 64;
+ iph->protocol = IPPROTO_UDP;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a08bd2b..c59bd7c 100644
--- a/net/core/rtnetlink.c
@@ -90456,9 +93302,18 @@ index a08bd2b..c59bd7c 100644
if (extfilt)
filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/scm.c b/net/core/scm.c
-index 03795d0..eaf7368 100644
+index 03795d0..98d6bdb 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
+@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
+ return -EINVAL;
+
+ if ((creds->pid == task_tgid_vnr(current) ||
+- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
++ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
+ ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
+ uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
+ ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
{
@@ -90770,6 +93625,19 @@ index a55eecc..dd8428c 100644
return -EFAULT;
*lenp = len;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 55e1fd5..fd602b8 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb,
+ hc06_ptr += 3;
+ } else {
+ /* compress nothing */
+- memcpy(hc06_ptr, &hdr, 4);
++ memcpy(hc06_ptr, hdr, 4);
+ /* replace the top byte with new ECN | DSCP format */
+ *hc06_ptr = tmp;
+ hc06_ptr += 4;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d01be2a..8976537 100644
--- a/net/ipv4/af_inet.c
@@ -91932,7 +94800,7 @@ index 9a459be..086b866 100644
return -ENOMEM;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index fb8c94c..fb18024 100644
+index fb8c94c..80a31d8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
@@ -91944,7 +94812,24 @@ index fb8c94c..fb18024 100644
net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
-@@ -2380,7 +2380,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
+@@ -1124,12 +1124,10 @@ retry:
+ if (ifp->flags & IFA_F_OPTIMISTIC)
+ addr_flags |= IFA_F_OPTIMISTIC;
+
+- ift = !max_addresses ||
+- ipv6_count_addresses(idev) < max_addresses ?
+- ipv6_add_addr(idev, &addr, tmp_plen,
+- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+- addr_flags) : NULL;
+- if (IS_ERR_OR_NULL(ift)) {
++ ift = ipv6_add_addr(idev, &addr, tmp_plen,
++ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
++ addr_flags);
++ if (IS_ERR(ift)) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ pr_info("%s: retry temporary address regeneration\n", __func__);
+@@ -2380,7 +2378,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
@@ -91953,7 +94838,7 @@ index fb8c94c..fb18024 100644
if (ops->ndo_do_ioctl) {
mm_segment_t oldfs = get_fs();
-@@ -4002,7 +4002,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+@@ -4002,7 +4000,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
s_ip_idx = ip_idx = cb->args[2];
rcu_read_lock();
@@ -91962,7 +94847,7 @@ index fb8c94c..fb18024 100644
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
-@@ -4587,7 +4587,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+@@ -4587,7 +4585,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
dst_free(&ifp->rt->dst);
break;
}
@@ -91971,7 +94856,7 @@ index fb8c94c..fb18024 100644
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
-@@ -4607,7 +4607,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
+@@ -4607,7 +4605,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
@@ -91980,7 +94865,7 @@ index fb8c94c..fb18024 100644
int ret;
/*
-@@ -4689,7 +4689,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
+@@ -4689,7 +4687,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
@@ -92181,10 +95066,52 @@ index dffdc1a..ccc6678 100644
err_alloc:
return -ENOMEM;
}
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index c2e73e6..12cca6f 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -8,8 +8,8 @@
+
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ {
+- static atomic_t ipv6_fragmentation_id;
+- int old, new;
++ static atomic_unchecked_t ipv6_fragmentation_id;
++ int id;
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
+@@ -25,13 +25,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ }
+ }
+ #endif
+- do {
+- old = atomic_read(&ipv6_fragmentation_id);
+- new = old + 1;
+- if (!new)
+- new = 1;
+- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
+- fhdr->identification = htonl(new);
++ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
++ if (!id)
++ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
++ fhdr->identification = htonl(id);
+ }
+ EXPORT_SYMBOL(ipv6_select_ident);
+
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
-index eedff8c..6e13a47 100644
+index eedff8c..7d7e24a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
+@@ -108,7 +108,7 @@ found:
+ */
+ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- struct icmp6hdr *_hdr;
++ struct icmp6hdr _hdr;
+ const struct icmp6hdr *hdr;
+
+ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
@@ -92767,7 +95694,7 @@ index 514e90f..56f22bf 100644
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index 8a7bfc4..4407cd0 100644
+index 8a7bfc4..be07e86 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
@@ -92779,6 +95706,15 @@ index 8a7bfc4..4407cd0 100644
ret = drv_config(local, changed);
/*
* Goal:
+@@ -921,7 +921,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ hw->queues = IEEE80211_MAX_QUEUES;
+
+ local->workqueue =
+- alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
++ alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy));
+ if (!local->workqueue) {
+ result = -ENOMEM;
+ goto fail_workqueue;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 3401262..d5cd68d 100644
--- a/net/mac80211/pm.c
@@ -93183,9 +96119,18 @@ index 0ab9636..cea3c6a 100644
{
if (users > 0)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
-index a99b6c3..3841268 100644
+index a99b6c3..cb372f9 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const char *msg;
+ u_int8_t state;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+
+ state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
out_invalid:
if (LOG_INVALID(net, IPPROTO_DCCP))
@@ -93195,6 +96140,24 @@ index a99b6c3..3841268 100644
return false;
}
+@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
+ u_int8_t type, old_state, new_state;
+ enum ct_dccp_roles role;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+ type = dh->dccph_type;
+
+@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ unsigned int cscov;
+ const char *msg;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ if (dh == NULL) {
+ msg = "nf_ct_dccp: short packet ";
+ goto out_invalid;
@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
out_invalid:
@@ -93608,7 +96571,7 @@ index 57ee84d..8b99cf5 100644
);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 1076fe1..8285fd7 100644
+index 1076fe1..f190285 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -93649,27 +96612,6 @@ index 1076fe1..8285fd7 100644
return 0;
}
}
-@@ -789,6 +791,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- struct net *net = sock_net(skb->sk);
- int chains_to_skip = cb->args[0];
- int fams_to_skip = cb->args[1];
-+ bool need_locking = chains_to_skip || fams_to_skip;
-+
-+ if (need_locking)
-+ genl_lock();
-
- for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- n = 0;
-@@ -810,6 +816,9 @@ errout:
- cb->args[0] = i;
- cb->args[1] = n;
-
-+ if (need_locking)
-+ genl_unlock();
-+
- return skb->len;
- }
-
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ec0c80f..41e1830 100644
--- a/net/netrom/af_netrom.c
@@ -94707,6 +97649,28 @@ index 4ca1526..df83e47 100644
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 29b4ba9..f648ae7 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1144,7 +1144,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
+ uint64_t *handle)
+ {
+ struct rsc rsci, *rscp = NULL;
+- static atomic64_t ctxhctr;
++ static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0);
+ long long ctxh;
+ struct gss_api_mech *gm = NULL;
+ time_t expiry;
+@@ -1155,7 +1155,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
+ status = -ENOMEM;
+ /* the handle needs to be just a unique id,
+ * use a static counter */
+- ctxh = atomic64_inc_return(&ctxhctr);
++ ctxh = atomic64_inc_return_unchecked(&ctxhctr);
+
+ /* make a copy for the caller */
+ *handle = ctxh;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 426f8fc..1ef9c32 100644
--- a/net/sunrpc/clnt.c
@@ -95279,7 +98243,7 @@ index ea970b8..c68edb9f 100644
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
-index 78f66fa..9286768 100644
+index 78f66fa..b93d547 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -177,12 +177,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
@@ -95373,6 +98337,19 @@ index 78f66fa..9286768 100644
module_put(mode->afinfo->owner);
err = 0;
}
+@@ -1486,10 +1493,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
+ u32 xfrm_get_acqseq(void)
+ {
+ u32 res;
+- static atomic_t acqseq;
++ static atomic_unchecked_t acqseq;
+
+ do {
+- res = atomic_inc_return(&acqseq);
++ res = atomic_inc_return_unchecked(&acqseq);
+ } while (!res);
+
+ return res;
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05a6e3d..6716ec9 100644
--- a/net/xfrm/xfrm_sysctl.c
@@ -95822,7 +98799,7 @@ index f5eb43d..1814de8 100644
shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
diff --git a/security/Kconfig b/security/Kconfig
-index e9c6ac7..3e3f362 100644
+index e9c6ac7..c5d45c8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,6 +4,959 @@
@@ -96661,7 +99638,7 @@ index e9c6ac7..3e3f362 100644
+config PAX_REFCOUNT
+ bool "Prevent various kernel object reference counter overflows"
+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || SPARC64 || X86)
++ depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64 || X86)
+ help
+ By saying Y here the kernel will detect and prevent overflowing
+ various (but not all) kinds of object reference counters. Such
@@ -97380,6 +100357,37 @@ index a3dce87..9ca1435 100644
}
/* Save user chosen LSM */
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index dad36a6..7e5ffbf 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -59,7 +59,7 @@ struct avc_node {
+ struct avc_cache {
+ struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
+ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
+- atomic_t lru_hint; /* LRU hint for reclaim scan */
++ atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */
+ atomic_t active_nodes;
+ u32 latest_notif; /* latest revocation notification */
+ };
+@@ -167,7 +167,7 @@ void __init avc_init(void)
+ spin_lock_init(&avc_cache.slots_lock[i]);
+ }
+ atomic_set(&avc_cache.active_nodes, 0);
+- atomic_set(&avc_cache.lru_hint, 0);
++ atomic_set_unchecked(&avc_cache.lru_hint, 0);
+
+ avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
+ 0, SLAB_PANIC, NULL);
+@@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void)
+ spinlock_t *lock;
+
+ for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
+- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
++ hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+ head = &avc_cache.slots[hvalue];
+ lock = &avc_cache.slots_lock[hvalue];
+
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 5c6f2cd..b4f945c 100644
--- a/security/selinux/hooks.c
@@ -97978,6 +100986,27 @@ index 7d8803a..559f8d0 100644
list_add(&s->list, &cs4297a_devs);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 55108b5..d973e11 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -916,14 +916,10 @@ find_codec_preset(struct hda_codec *codec)
+ mutex_unlock(&preset_mutex);
+
+ if (mod_requested < HDA_MODREQ_MAX_COUNT) {
+- char name[32];
+ if (!mod_requested)
+- snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
+- codec->vendor_id);
++ request_module("snd-hda-codec-id:%08x", codec->vendor_id);
+ else
+- snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
+- (codec->vendor_id >> 16) & 0xffff);
+- request_module(name);
++ request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
+ mod_requested++;
+ goto again;
+ }
diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index 4631a23..001ae57 100644
--- a/sound/pci/ymfpci/ymfpci.h
@@ -99298,10 +102327,10 @@ index 0000000..568b360
+}
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
new file mode 100644
-index 0000000..0408e06
+index 0000000..257529f
--- /dev/null
+++ b/tools/gcc/kernexec_plugin.c
-@@ -0,0 +1,465 @@
+@@ -0,0 +1,471 @@
+/*
+ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -99353,7 +102382,7 @@ index 0000000..0408e06
+int plugin_is_GPL_compatible;
+
+static struct plugin_info kernexec_plugin_info = {
-+ .version = "201302112000",
++ .version = "201308230150",
+ .help = "method=[bts|or]\tinstrumentation method\n"
+};
+
@@ -99504,7 +102533,7 @@ index 0000000..0408e06
+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
+{
+ gimple assign_intptr, assign_new_fptr, call_stmt;
-+ tree intptr, old_fptr, new_fptr, kernexec_mask;
++ tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
+
+ call_stmt = gsi_stmt(*gsi);
+ old_fptr = gimple_call_fn(call_stmt);
@@ -99513,16 +102542,20 @@ index 0000000..0408e06
+ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
+#if BUILDING_GCC_VERSION <= 4007
+ add_referenced_var(intptr);
-+ mark_sym_for_renaming(intptr);
+#endif
++ intptr = make_ssa_name(intptr, NULL);
+ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
++ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
+ update_stmt(assign_intptr);
+
+ // apply logical or to temporary unsigned long and bitmask
+ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
+// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
-+ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
++ orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
++ intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
++ assign_intptr = gimple_build_assign(intptr, orptr);
++ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
+ update_stmt(assign_intptr);
+
@@ -99530,9 +102563,10 @@ index 0000000..0408e06
+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
+#if BUILDING_GCC_VERSION <= 4007
+ add_referenced_var(new_fptr);
-+ mark_sym_for_renaming(new_fptr);
+#endif
++ new_fptr = make_ssa_name(new_fptr, NULL);
+ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
++ SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
+ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
+ update_stmt(assign_new_fptr);
+
@@ -99560,8 +102594,8 @@ index 0000000..0408e06
+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
+#if BUILDING_GCC_VERSION <= 4007
+ add_referenced_var(new_fptr);
-+ mark_sym_for_renaming(new_fptr);
+#endif
++ new_fptr = make_ssa_name(new_fptr, NULL);
+
+ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
+ input = build_tree_list(NULL_TREE, build_string(2, "0"));
@@ -99576,6 +102610,7 @@ index 0000000..0408e06
+ vec_safe_push(outputs, output);
+#endif
+ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
++ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
+ gimple_asm_set_volatile(asm_or_stmt, true);
+ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
+ update_stmt(asm_or_stmt);
@@ -99769,10 +102804,10 @@ index 0000000..0408e06
+}
diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
new file mode 100644
-index 0000000..b5395ba
+index 0000000..2ef6fd9
--- /dev/null
+++ b/tools/gcc/latent_entropy_plugin.c
-@@ -0,0 +1,327 @@
+@@ -0,0 +1,321 @@
+/*
+ * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -99824,7 +102859,7 @@ index 0000000..b5395ba
+static tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
-+ .version = "201303102320",
++ .version = "201308230230",
+ .help = NULL
+};
+
@@ -99933,13 +102968,10 @@ index 0000000..b5395ba
+ op = get_op(&rhs);
+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
+ assign = gimple_build_assign(local_entropy, addxorrol);
-+#if BUILDING_GCC_VERSION <= 4007
-+ find_referenced_vars_in(assign);
-+#endif
-+//debug_bb(bb);
+ gsi = gsi_after_labels(bb);
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
++//debug_bb(bb);
+}
+
+static void perturb_latent_entropy(basic_block bb, tree rhs)
@@ -99952,13 +102984,14 @@ index 0000000..b5395ba
+ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
+#if BUILDING_GCC_VERSION <= 4007
+ add_referenced_var(temp);
-+ mark_sym_for_renaming(temp);
+#endif
+
+ // 2. read...
++ temp = make_ssa_name(temp, NULL);
+ assign = gimple_build_assign(temp, latent_entropy_decl);
++ SSA_NAME_DEF_STMT(temp) = assign;
+#if BUILDING_GCC_VERSION <= 4007
-+ find_referenced_vars_in(assign);
++ add_referenced_var(latent_entropy_decl);
+#endif
+ gsi = gsi_after_labels(bb);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
@@ -99966,18 +102999,14 @@ index 0000000..b5395ba
+
+ // 3. ...modify...
+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
++ temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
+ assign = gimple_build_assign(temp, addxorrol);
-+#if BUILDING_GCC_VERSION <= 4007
-+ find_referenced_vars_in(assign);
-+#endif
++ SSA_NAME_DEF_STMT(temp) = assign;
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ // 4. ...write latent_entropy
+ assign = gimple_build_assign(latent_entropy_decl, temp);
-+#if BUILDING_GCC_VERSION <= 4007
-+ find_referenced_vars_in(assign);
-+#endif
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
@@ -100028,21 +103057,21 @@ index 0000000..b5395ba
+
+ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
+// gimple_set_location(assign, loc);
-+#if BUILDING_GCC_VERSION <= 4007
-+ find_referenced_vars_in(assign);
-+#endif
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
++//debug_bb(bb);
+ bb = bb->next_bb;
+
+ // 3. instrument each BB with an operation on the local entropy variable
+ while (bb != EXIT_BLOCK_PTR) {
+ perturb_local_entropy(bb, local_entropy);
++//debug_bb(bb);
+ bb = bb->next_bb;
+ };
+
+ // 4. mix local entropy into the global entropy variable
+ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
++//debug_bb(EXIT_BLOCK_PTR->prev_bb);
+ return 0;
+}
+
@@ -106458,10 +109487,10 @@ index 0000000..b04803b
+alloc_dr_65495 alloc_dr 2 65495 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..9db0d0e
+index 0000000..03d0c84
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,2114 @@
+@@ -0,0 +1,2113 @@
+/*
+ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -106551,7 +109580,7 @@ index 0000000..9db0d0e
+static void print_missing_msg(tree func, unsigned int argnum);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20130410beta",
++ .version = "20130822beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -106931,7 +109960,6 @@ index 0000000..9db0d0e
+
+#if BUILDING_GCC_VERSION <= 4007
+ add_referenced_var(new_var);
-+ mark_sym_for_renaming(new_var);
+#endif
+ return new_var;
+}
diff --git a/main/linux-virt-grsec/kernelconfig.x86 b/main/linux-virt-grsec/kernelconfig.x86
index 36db38fae0..77cb6c9862 100644
--- a/main/linux-virt-grsec/kernelconfig.x86
+++ b/main/linux-virt-grsec/kernelconfig.x86
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 3.10.7 Kernel Configuration
+# Linux/x86 3.10.10 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -3561,6 +3561,11 @@ CONFIG_GRKERNSEC_NO_SIMULT_CONNECT=y
# CONFIG_GRKERNSEC_SOCKET is not set
#
+# Physical Protections
+#
+# CONFIG_GRKERNSEC_DENYUSB is not set
+
+#
# Sysctl Support
#
CONFIG_GRKERNSEC_SYSCTL=y
diff --git a/main/linux-virt-grsec/kernelconfig.x86_64 b/main/linux-virt-grsec/kernelconfig.x86_64
index d6c571438d..a43ae25783 100644
--- a/main/linux-virt-grsec/kernelconfig.x86_64
+++ b/main/linux-virt-grsec/kernelconfig.x86_64
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 3.10.7 Kernel Configuration
+# Linux/x86 3.10.10 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -3546,6 +3546,11 @@ CONFIG_GRKERNSEC_NO_SIMULT_CONNECT=y
# CONFIG_GRKERNSEC_SOCKET is not set
#
+# Physical Protections
+#
+# CONFIG_GRKERNSEC_DENYUSB is not set
+
+#
# Sysctl Support
#
CONFIG_GRKERNSEC_SYSCTL=y