summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2013-04-18 07:09:44 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2013-04-18 07:09:44 +0000
commit822b46a43c917287b186f6ecd4035a40bc50f9ba (patch)
tree4a23024c2d880bc4ca6a900f681b5548ab32eca8
parent5e601f16034bec4f4e258199d6e8ea0da7a93724 (diff)
downloadaports-822b46a43c917287b186f6ecd4035a40bc50f9ba.tar.bz2
aports-822b46a43c917287b186f6ecd4035a40bc50f9ba.tar.xz
main/linux-grsec: upgrade to 3.8.8
-rw-r--r--main/linux-grsec/APKBUILD16
-rw-r--r--main/linux-grsec/grsecurity-2.9.1-3.8.8-201304172116.patch (renamed from main/linux-grsec/grsecurity-2.9.1-3.8.7-201304142158.patch)355
2 files changed, 154 insertions, 217 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index f7579358d..0dba0bc2b 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,7 +2,7 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.8.7
+pkgver=3.8.8
_kernver=3.8
pkgrel=0
pkgdesc="Linux kernel with grsecurity"
@@ -14,7 +14,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-2.9.1-3.8.7-201304142158.patch
+ grsecurity-2.9.1-3.8.8-201304172116.patch
0004-arp-flush-arp-cache-on-device-change.patch
@@ -141,20 +141,20 @@ dev() {
}
md5sums="1c738edfc54e7c65faeb90c436104e2f linux-3.8.tar.xz
-d166692330220c425d69db82c9d693b6 patch-3.8.7.xz
-b1d5626b6cdce1037c06ace84e04acff grsecurity-2.9.1-3.8.7-201304142158.patch
+08cdcef928c2ca402adf1c444a3c43ac patch-3.8.8.xz
+51c922d1e46251ab693f87fa673380d6 grsecurity-2.9.1-3.8.8-201304172116.patch
776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch
0914bcf698bb5e1a39d2888ad2c5c442 kernelconfig.x86
477f1a2a20dd6634dfa42f4732235370 kernelconfig.x86_64"
sha256sums="e070d1bdfbded5676a4f374721c63565f1c969466c5a3e214004a136b583184b linux-3.8.tar.xz
-35596a6e1504354ce165a36b743fc14eeeae3a462a321eafca54ab1b3215f861 patch-3.8.7.xz
-eea6cedf3e2ab2d45df7d9a04113f97ed8b666d7c248bfa34c0976216535b33f grsecurity-2.9.1-3.8.7-201304142158.patch
+759313a1012f17c83af15f237f3ad8b50a45f1bb34c62409e558a4d65bf014c3 patch-3.8.8.xz
+1ab35660c7a1a33a83e331b1ec23f52f3773ff94e7fd119acd83a58fc1dd3331 grsecurity-2.9.1-3.8.8-201304172116.patch
e2d2d1503f53572c6a2e21da729a13a430dd01f510405ffb3a33b29208860bde 0004-arp-flush-arp-cache-on-device-change.patch
fea4df55c6db0a058eb24ede61473bf401a52ceb1945d5d552421847cc947160 kernelconfig.x86
6b4c04220aaecd9854ac6e889e7518c931f1c3f5f2e7c32c2c084ccfc3be911f kernelconfig.x86_64"
sha512sums="10a7983391af907d8aec72bdb096d1cabd4911985715e9ea13d35ff09095c035db15d4ab08b92eda7c10026cc27348cb9728c212335f7fcdcda7c610856ec30f linux-3.8.tar.xz
-311cb2b75671ec842c7f4f4724af5afe2a23458eb28f2199ed9a4472f7a34e10ccd1f656a4c61634a0f6606714d5d4ebd6007ea90eddbdd32d83179e4adcb242 patch-3.8.7.xz
-cf265d345fe2ba1d53b7cccddfb5a06424ca49da48a76261fa18f8e963155fbcfea99a3eb016f6a78cfb6e5477bfb97972322633cf503470f9d01592dd3b6f6c grsecurity-2.9.1-3.8.7-201304142158.patch
+dedc73b00d159a944ebc8efe961afafa64db140eca7fa1609dfea52517c60707384e633a5d05c70bb31603f6b668a8ceef1ce28eac62f8ce0fa67395265e8338 patch-3.8.8.xz
+be813a5108a42f9b1795d8b4646a492c4ccfcf6e82f984fdce2d7a4be0c1dfd8966c0f8522dd26930e23d48c46bc106e6e4c5fa35f6c008b1dca56367b5b59cd grsecurity-2.9.1-3.8.8-201304172116.patch
b6fdf376009f0f0f3fa194cb11be97343e4d394cf5d3547de6cfca8ad619c5bd3f60719331fd8cfadc47f09d22be8376ba5f871b46b24887ea73fe47e233a54e 0004-arp-flush-arp-cache-on-device-change.patch
ffb12d33f55dbc50e97156feaf65e29f6b332750e43c33ed90b2def5029d039b0b87d559483cf3a80f330dadac68f921fa276dc6cc9fbc4e60050985d823501e kernelconfig.x86
3bdc68b0b8d36b051ac543f13eba1151902e1e43e76abef8d8dcbaa6927db6365f1b091505569af8146c89e486e24647e8e96fb6b96f30a0071f59e5923950cb kernelconfig.x86_64"
diff --git a/main/linux-grsec/grsecurity-2.9.1-3.8.7-201304142158.patch b/main/linux-grsec/grsecurity-2.9.1-3.8.8-201304172116.patch
index 8cb197369..b90155e11 100644
--- a/main/linux-grsec/grsecurity-2.9.1-3.8.7-201304142158.patch
+++ b/main/linux-grsec/grsecurity-2.9.1-3.8.8-201304172116.patch
@@ -259,7 +259,7 @@ index 986614d..e8bfedc 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 85204da..9d99250 100644
+index 7684f95..12f2f86 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -14381,7 +14381,7 @@ index 320f7bb..e89f8f8 100644
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 5edd174..c395822 100644
+index 7361e47..16dc226 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
@@ -14412,7 +14412,7 @@ index 5edd174..c395822 100644
static inline void pgd_clear(pgd_t *pgdp)
{
set_pgd(pgdp, __pgd(0));
-@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+@@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
@@ -14434,7 +14434,7 @@ index 5edd174..c395822 100644
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
-@@ -927,7 +954,7 @@ extern void default_banner(void);
+@@ -930,7 +957,7 @@ extern void default_banner(void);
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
@@ -14443,7 +14443,7 @@ index 5edd174..c395822 100644
#endif
#define INTERRUPT_RETURN \
-@@ -1002,6 +1029,21 @@ extern void default_banner(void);
+@@ -1005,6 +1032,21 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
@@ -14466,7 +14466,7 @@ index 5edd174..c395822 100644
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index 142236e..5446ffbc 100644
+index b3b0ec1..b1cd3eb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
@@ -14478,7 +14478,7 @@ index 142236e..5446ffbc 100644
struct pv_lazy_ops {
-@@ -97,7 +97,7 @@ struct pv_time_ops {
+@@ -98,7 +98,7 @@ struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
unsigned long (*get_tsc_khz)(void);
@@ -14487,7 +14487,7 @@ index 142236e..5446ffbc 100644
struct pv_cpu_ops {
/* hooks for various privileged instructions */
-@@ -191,7 +191,7 @@ struct pv_cpu_ops {
+@@ -192,7 +192,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
@@ -14496,7 +14496,7 @@ index 142236e..5446ffbc 100644
struct pv_irq_ops {
/*
-@@ -222,7 +222,7 @@ struct pv_apic_ops {
+@@ -223,7 +223,7 @@ struct pv_apic_ops {
unsigned long start_eip,
unsigned long start_esp);
#endif
@@ -14505,7 +14505,7 @@ index 142236e..5446ffbc 100644
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
-@@ -312,6 +312,7 @@ struct pv_mmu_ops {
+@@ -313,6 +313,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
@@ -14513,7 +14513,7 @@ index 142236e..5446ffbc 100644
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-@@ -323,6 +324,12 @@ struct pv_mmu_ops {
+@@ -324,6 +325,12 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -14526,7 +14526,7 @@ index 142236e..5446ffbc 100644
};
struct arch_spinlock;
-@@ -333,7 +340,7 @@ struct pv_lock_ops {
+@@ -334,7 +341,7 @@ struct pv_lock_ops {
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
int (*spin_trylock)(struct arch_spinlock *lock);
void (*spin_unlock)(struct arch_spinlock *lock);
@@ -15914,19 +15914,6 @@ index 2d946e6..e453ec4 100644
+
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
-diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
-index 4fef207..c779730 100644
---- a/arch/x86/include/asm/tlb.h
-+++ b/arch/x86/include/asm/tlb.h
-@@ -7,7 +7,7 @@
-
- #define tlb_flush(tlb) \
- { \
-- if (tlb->fullmm == 0) \
-+ if (!tlb->fullmm && !tlb->need_flush_all) \
- flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
- else \
- flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1709801..0a60f2f 100644
--- a/arch/x86/include/asm/uaccess.h
@@ -22094,7 +22081,7 @@ index 676b8c7..870ba04 100644
.spin_is_locked = __ticket_spin_is_locked,
.spin_is_contended = __ticket_spin_is_contended,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 17fff18..5cfa0f4 100644
+index 8bfb335..c1463c6 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -22139,8 +22126,8 @@ index 17fff18..5cfa0f4 100644
return insn_len;
}
-@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
- preempt_enable();
+@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return this_cpu_read(paravirt_lazy_mode);
}
-struct pv_info pv_info = {
@@ -22215,7 +22202,7 @@ index 17fff18..5cfa0f4 100644
#endif
#endif /* PAGETABLE_LEVELS >= 3 */
-@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
},
.set_fixmap = native_set_fixmap,
@@ -24574,7 +24561,7 @@ index c243b81..b692af3 100644
kvm_pmu_init(vcpu);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index df4176c..23ce092 100644
+index 20a4fd4..d806083 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
@@ -27515,7 +27502,7 @@ index 903ec1e..c4166b2 100644
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index fb674fd..223a693 100644
+index 4f7d793..165a8be 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,12 +13,19 @@
@@ -27672,7 +27659,7 @@ index fb674fd..223a693 100644
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
-@@ -541,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
@@ -27681,7 +27668,7 @@ index fb674fd..223a693 100644
return 1;
#endif
return 0;
-@@ -568,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
}
static const char nx_warning[] = KERN_CRIT
@@ -27690,7 +27677,7 @@ index fb674fd..223a693 100644
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -577,15 +648,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
@@ -27720,7 +27707,7 @@ index fb674fd..223a693 100644
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
-@@ -748,6 +831,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
@@ -27743,7 +27730,7 @@ index fb674fd..223a693 100644
/* Kernel addresses are always protection faults: */
if (address >= TASK_SIZE)
error_code |= PF_PROT;
-@@ -833,7 +932,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
@@ -27752,7 +27739,7 @@ index fb674fd..223a693 100644
code = BUS_MCEERR_AR;
}
#endif
-@@ -896,6 +995,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
return 1;
}
@@ -27852,7 +27839,7 @@ index fb674fd..223a693 100644
/*
* Handle a spurious fault caused by a stale TLB entry.
*
-@@ -968,6 +1160,9 @@ int show_unhandled_signals = 1;
+@@ -970,6 +1162,9 @@ int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
@@ -27862,7 +27849,7 @@ index fb674fd..223a693 100644
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -996,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+@@ -998,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (error_code & PF_USER)
return false;
@@ -27871,7 +27858,7 @@ index fb674fd..223a693 100644
return false;
return true;
-@@ -1012,18 +1207,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1014,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -27910,7 +27897,7 @@ index fb674fd..223a693 100644
/*
* Detect and handle instructions that would cause a page fault for
-@@ -1084,7 +1294,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1086,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
@@ -27919,7 +27906,7 @@ index fb674fd..223a693 100644
local_irq_enable();
error_code |= PF_USER;
} else {
-@@ -1146,6 +1356,11 @@ retry:
+@@ -1148,6 +1358,11 @@ retry:
might_sleep();
}
@@ -27931,7 +27918,7 @@ index fb674fd..223a693 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1157,18 +1372,24 @@ retry:
+@@ -1159,18 +1374,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -27967,7 +27954,7 @@ index fb674fd..223a693 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1232,3 +1453,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1234,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
__do_page_fault(regs, error_code);
exception_exit(regs);
}
@@ -29437,24 +29424,10 @@ index 9f0614d..92ae64a 100644
p += get_opcode(p, &opcode);
for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index e27fbf8..213e72b 100644
+index 395b3b4a..213e72b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
-@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
- void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
- {
- paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
-+ /*
-+ * NOTE! For PAE, any changes to the top page-directory-pointer-table
-+ * entries need a full cr3 reload to flush.
-+ */
-+#ifdef CONFIG_X86_PAE
-+ tlb->need_flush_all = 1;
-+#endif
- tlb_remove_page(tlb, virt_to_page(pmd));
- }
-
-@@ -84,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
+@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
list_del(&page->lru);
}
@@ -29521,7 +29494,7 @@ index e27fbf8..213e72b 100644
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
-@@ -128,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
pgd_list_del(pgd);
spin_unlock(&pgd_lock);
}
@@ -29529,7 +29502,7 @@ index e27fbf8..213e72b 100644
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
-@@ -140,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
* -- nyc
*/
@@ -29538,7 +29511,7 @@ index e27fbf8..213e72b 100644
/*
* In PAE mode, we need to do a cr3 reload (=tlb flush) when
* updating the top-level pagetable entries to guarantee the
-@@ -152,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
* not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
* and initialize the kernel pmds here.
*/
@@ -29547,7 +29520,7 @@ index e27fbf8..213e72b 100644
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
-@@ -170,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
*/
flush_tlb_mm(mm);
}
@@ -29597,7 +29570,7 @@ index e27fbf8..213e72b 100644
return -ENOMEM;
}
-@@ -212,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
+@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
@@ -29670,7 +29643,7 @@ index e27fbf8..213e72b 100644
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
-@@ -265,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
@@ -29684,7 +29657,7 @@ index e27fbf8..213e72b 100644
/*
* Make sure that pre-populating the pmds is atomic with
-@@ -279,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
spin_lock(&pgd_lock);
pgd_ctor(mm, pgd);
@@ -29702,7 +29675,7 @@ index e27fbf8..213e72b 100644
out_free_pgd:
free_page((unsigned long)pgd);
out:
-@@ -295,7 +363,7 @@ out:
+@@ -302,7 +363,7 @@ out:
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
@@ -31620,7 +31593,7 @@ index 2262003..3ee61cf 100644
};
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 01de35c..692023f 100644
+index cab96b6..8c629ba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1739,14 +1739,18 @@ static void *m2v(phys_addr_t maddr)
@@ -44339,7 +44312,7 @@ index da9fde8..c07975f 100644
/*
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
-index c578229..45aa9ee 100644
+index 78f1be2..3e98910 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
@@ -50789,7 +50762,7 @@ index b2a34a1..162fa69 100644
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
-index 20df02c..1b1d946 100644
+index 20df02c..c9a5bc9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,6 +55,17 @@
@@ -51263,7 +51236,7 @@ index 20df02c..1b1d946 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1697,3 +1867,278 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return error;
}
#endif
@@ -51475,6 +51448,24 @@ index 20df02c..1b1d946 100644
+}
+#endif
+
++#ifdef CONFIG_PAX_USERCOPY
++static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
++{
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long textlow = ktla_ktva((unsigned long)_stext);
++ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
++#else
++ unsigned long textlow = _stext;
++ unsigned long texthigh = _etext;
++#endif
++
++ if (high < textlow || low > texthigh)
++ return false;
++ else
++ return true;
++}
++#endif
++
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+
@@ -51486,9 +51477,16 @@ index 20df02c..1b1d946 100644
+
+ type = check_heap_object(ptr, n);
+ if (!type) {
-+ if (check_stack_object(ptr, n) != -1)
++ int ret = check_stack_object(ptr, n);
++ if (ret == 1 || ret == 2)
+ return;
-+ type = "<process stack>";
++ if (ret == 0) {
++ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
++ type = "<kernel text>";
++ else
++ return;
++ } else
++ type = "<process stack>";
+ }
+
+ pax_report_usercopy(ptr, n, to_user, type);
@@ -53477,18 +53475,9 @@ index 78bde32..767e906 100644
static int can_do_hugetlb_shm(void)
{
diff --git a/fs/inode.c b/fs/inode.c
-index 14084b7..6a439ea 100644
+index b98540e..6a439ea 100644
--- a/fs/inode.c
+++ b/fs/inode.c
-@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
- * inode to the back of the list so we don't spin on it.
- */
- if (!spin_trylock(&inode->i_lock)) {
-- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
-+ list_move(&inode->i_lru, &sb->s_inode_lru);
- continue;
- }
-
@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
#ifdef CONFIG_SMP
@@ -67520,24 +67509,6 @@ index 5cf680a..4b74d62 100644
#endif /* CONFIG_MMU */
#endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
-index 25f01d0..b1b1fa6 100644
---- a/include/asm-generic/tlb.h
-+++ b/include/asm-generic/tlb.h
-@@ -99,7 +99,12 @@ struct mmu_gather {
- unsigned int need_flush : 1, /* Did free PTEs */
- fast_mode : 1; /* No batching */
-
-- unsigned int fullmm;
-+ /* we are in the middle of an operation to clear
-+ * a full mm and can make some optimizations */
-+ unsigned int fullmm : 1,
-+ /* we have performed an operation which
-+ * requires a complete flush of the tlb */
-+ need_flush_all : 1;
-
- struct mmu_gather_batch *active;
- struct mmu_gather_batch local;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index d1ea7ce..b1ebf2a 100644
--- a/include/asm-generic/vmlinux.lds.h
@@ -69911,6 +69882,19 @@ index f3b99e1..9b73cee 100644
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 85ac9b9b..e5759ab 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+ }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fdf2c4a..5332486 100644
--- a/include/linux/irq.h
@@ -74224,7 +74208,7 @@ index f3f40dc..ffe5a3a 100644
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
diff --git a/ipc/msg.c b/ipc/msg.c
-index 31cd1bf..9778e0f8 100644
+index fede1d0..9778e0f8 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
@@ -74252,14 +74236,6 @@ index 31cd1bf..9778e0f8 100644
msg_params.key = key;
msg_params.flg = msgflg;
-@@ -872,6 +873,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
- goto out_unlock;
- break;
- }
-+ msg = ERR_PTR(-EAGAIN);
- } else
- break;
- msg_counter++;
diff --git a/ipc/sem.c b/ipc/sem.c
index 58d31f1..cce7a55 100644
--- a/ipc/sem.c
@@ -78749,7 +78725,7 @@ index 2f194e9..2c05ea9 100644
.priority = 10,
};
diff --git a/kernel/sys.c b/kernel/sys.c
-index 265b376..48b8613 100644
+index 47f1d1b..04c769e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -78765,7 +78741,7 @@ index 265b376..48b8613 100644
no_nice = security_task_setnice(p, niceval);
if (no_nice) {
error = no_nice;
-@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+@@ -596,6 +602,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
goto error;
}
@@ -78775,7 +78751,7 @@ index 265b376..48b8613 100644
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
new->sgid = new->egid;
-@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+@@ -631,6 +640,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
old = current_cred();
retval = -EPERM;
@@ -78786,7 +78762,7 @@ index 265b376..48b8613 100644
if (nsown_capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
-@@ -647,7 +660,7 @@ error:
+@@ -648,7 +661,7 @@ error:
/*
* change the user struct in a credentials set to match the new UID
*/
@@ -78795,7 +78771,7 @@ index 265b376..48b8613 100644
{
struct user_struct *new_user;
-@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+@@ -728,6 +741,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error;
}
@@ -78805,7 +78781,7 @@ index 265b376..48b8613 100644
if (!uid_eq(new->uid, old->uid)) {
retval = set_user(new);
if (retval < 0)
-@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+@@ -778,6 +794,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
old = current_cred();
retval = -EPERM;
@@ -78818,7 +78794,7 @@ index 265b376..48b8613 100644
if (nsown_capable(CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
-@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+@@ -847,6 +869,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error;
}
@@ -78828,7 +78804,7 @@ index 265b376..48b8613 100644
if (ruid != (uid_t) -1) {
new->uid = kruid;
if (!uid_eq(kruid, old->uid)) {
-@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+@@ -929,6 +954,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
goto error;
}
@@ -78838,7 +78814,7 @@ index 265b376..48b8613 100644
if (rgid != (gid_t) -1)
new->gid = krgid;
if (egid != (gid_t) -1)
-@@ -989,12 +1017,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+@@ -990,12 +1018,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
nsown_capable(CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
@@ -78855,7 +78831,7 @@ index 265b376..48b8613 100644
abort_creds(new);
return old_fsuid;
-@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+@@ -1028,12 +1060,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
nsown_capable(CAP_SETGID)) {
@@ -78872,7 +78848,7 @@ index 265b376..48b8613 100644
abort_creds(new);
return old_fsgid;
-@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+@@ -1341,19 +1377,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
return -EFAULT;
down_read(&uts_sem);
@@ -78897,7 +78873,7 @@ index 265b376..48b8613 100644
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
-@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+@@ -2027,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
@@ -79458,18 +79434,10 @@ index c0bd030..62a1927 100644
ret = -EIO;
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 64bc5d8..1ed69e2 100644
+index 35cc3a8..2a47da3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -668,7 +668,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
- free_page(tmp);
- }
-
-- free_page((unsigned long)stat->pages);
- stat->pages = NULL;
- stat->start = NULL;
-
-@@ -1874,12 +1873,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -1886,12 +1886,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -79489,7 +79457,7 @@ index 64bc5d8..1ed69e2 100644
}
/*
-@@ -2965,7 +2969,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+@@ -2964,7 +2969,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
@@ -79498,7 +79466,7 @@ index 64bc5d8..1ed69e2 100644
{
struct ftrace_func_probe *entry;
struct ftrace_page *pg;
-@@ -3832,8 +3836,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -3831,8 +3836,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -79509,7 +79477,7 @@ index 64bc5d8..1ed69e2 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -4555,8 +4561,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+@@ -4554,8 +4561,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
@@ -79518,7 +79486,7 @@ index 64bc5d8..1ed69e2 100644
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
-@@ -4700,6 +4704,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+@@ -4699,6 +4704,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
@@ -79529,7 +79497,7 @@ index 64bc5d8..1ed69e2 100644
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
-@@ -4713,7 +4721,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+@@ -4712,7 +4721,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
goto out;
}
@@ -79968,7 +79936,7 @@ index 194d796..76edb8f 100644
key = event->type & (EVENT_HASHSIZE - 1);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
-index 42ca822..cdcacc6 100644
+index 83a8b5b..0bf39a9 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -52,7 +52,7 @@ static inline void check_stack(void)
@@ -80319,33 +80287,10 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index e07ee1f..a4fd13d 100644
+index a654866..a4fd13d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
- return kobj;
- }
-
-+static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
-+{
-+ if (!kref_get_unless_zero(&kobj->kref))
-+ kobj = NULL;
-+ return kobj;
-+}
-+
- /*
- * kobject_cleanup - free kobject resources.
- * @kobj: object to cleanup
-@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
-
- list_for_each_entry(k, &kset->list, entry) {
- if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
-- ret = kobject_get(k);
-+ ret = kobject_get_unless_zero(k);
- break;
- }
- }
-@@ -852,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
@@ -81178,18 +81123,10 @@ index c6e4dd3..1f41988 100644
/* keep elevated page count for bad page */
return ret;
diff --git a/mm/memory.c b/mm/memory.c
-index bb1369f..38014f5 100644
+index f8b734a..38014f5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -212,6 +212,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
- tlb->mm = mm;
-
- tlb->fullmm = fullmm;
-+ tlb->need_flush_all = 0;
- tlb->start = -1UL;
- tlb->end = 0;
- tlb->need_flush = 0;
-@@ -433,6 +434,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -434,6 +434,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
@@ -81197,7 +81134,7 @@ index bb1369f..38014f5 100644
start &= PUD_MASK;
if (start < floor)
return;
-@@ -447,6 +449,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -448,6 +449,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
@@ -81206,7 +81143,7 @@ index bb1369f..38014f5 100644
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -466,6 +470,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -467,6 +470,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
@@ -81214,7 +81151,7 @@ index bb1369f..38014f5 100644
start &= PGDIR_MASK;
if (start < floor)
return;
-@@ -480,6 +485,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -481,6 +485,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
@@ -81223,7 +81160,7 @@ index bb1369f..38014f5 100644
}
/*
-@@ -1618,12 +1625,6 @@ no_page_table:
+@@ -1619,12 +1625,6 @@ no_page_table:
return page;
}
@@ -81236,7 +81173,7 @@ index bb1369f..38014f5 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1709,10 +1710,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1710,10 +1710,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -81249,7 +81186,7 @@ index bb1369f..38014f5 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1760,7 +1761,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1761,7 +1761,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -81258,7 +81195,7 @@ index bb1369f..38014f5 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1787,11 +1788,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,11 +1788,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -81270,7 +81207,7 @@ index bb1369f..38014f5 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1865,7 +1861,7 @@ next_page:
+@@ -1866,7 +1861,7 @@ next_page:
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -81279,7 +81216,7 @@ index bb1369f..38014f5 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2072,6 +2068,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2073,6 +2068,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -81290,7 +81227,7 @@ index bb1369f..38014f5 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2116,9 +2116,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2117,9 +2116,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -81312,7 +81249,7 @@ index bb1369f..38014f5 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2201,6 +2213,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2202,6 +2213,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -81320,7 +81257,7 @@ index bb1369f..38014f5 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2401,7 +2414,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2402,7 +2414,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -81331,7 +81268,7 @@ index bb1369f..38014f5 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2421,7 +2436,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2422,7 +2436,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -81342,7 +81279,7 @@ index bb1369f..38014f5 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2509,6 +2526,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2510,6 +2526,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -81529,7 +81466,7 @@ index bb1369f..38014f5 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2725,6 +2922,12 @@ gotten:
+@@ -2726,6 +2922,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -81542,7 +81479,7 @@ index bb1369f..38014f5 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2776,6 +2979,10 @@ gotten:
+@@ -2777,6 +2979,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -81553,7 +81490,7 @@ index bb1369f..38014f5 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3051,6 +3258,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3052,6 +3258,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -81565,7 +81502,7 @@ index bb1369f..38014f5 100644
unlock_page(page);
if (swapcache) {
/*
-@@ -3074,6 +3286,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3075,6 +3286,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -81577,7 +81514,7 @@ index bb1369f..38014f5 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3093,40 +3310,6 @@ out_release:
+@@ -3094,40 +3310,6 @@ out_release:
}
/*
@@ -81618,7 +81555,7 @@ index bb1369f..38014f5 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3135,27 +3318,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3136,27 +3318,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -81651,7 +81588,7 @@ index bb1369f..38014f5 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3174,6 +3353,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3175,6 +3353,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -81663,7 +81600,7 @@ index bb1369f..38014f5 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3181,6 +3365,12 @@ setpte:
+@@ -3182,6 +3365,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -81676,7 +81613,7 @@ index bb1369f..38014f5 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3324,6 +3514,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3325,6 +3514,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -81689,7 +81626,7 @@ index bb1369f..38014f5 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3343,6 +3539,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3344,6 +3539,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -81704,7 +81641,7 @@ index bb1369f..38014f5 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3664,6 +3868,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3665,6 +3868,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -81717,7 +81654,7 @@ index bb1369f..38014f5 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3680,6 +3890,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3681,6 +3890,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -81728,7 +81665,7 @@ index bb1369f..38014f5 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3691,6 +3905,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3692,6 +3905,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -81763,7 +81700,7 @@ index bb1369f..38014f5 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3789,6 +4031,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3790,6 +4031,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -81787,7 +81724,7 @@ index bb1369f..38014f5 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3819,11 +4078,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3820,11 +4078,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -81825,7 +81762,7 @@ index bb1369f..38014f5 100644
struct vm_area_struct * vma;
vma = find_vma(current->mm, addr);
-@@ -3856,7 +4139,7 @@ static int __init gate_vma_init(void)
+@@ -3857,7 +4139,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -81834,7 +81771,7 @@ index bb1369f..38014f5 100644
return 0;
}
-@@ -3990,8 +4273,8 @@ out:
+@@ -3991,8 +4273,8 @@ out:
return ret;
}
@@ -81845,7 +81782,7 @@ index bb1369f..38014f5 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4016,8 +4299,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4017,8 +4299,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -81856,7 +81793,7 @@ index bb1369f..38014f5 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4025,7 +4308,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4026,7 +4308,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -81865,7 +81802,7 @@ index bb1369f..38014f5 100644
void *maddr;
struct page *page = NULL;
-@@ -4084,8 +4367,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4085,8 +4367,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -81876,7 +81813,7 @@ index bb1369f..38014f5 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4095,11 +4378,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4096,11 +4378,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/