diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2010-07-22 06:32:51 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2010-07-22 06:32:51 +0000 |
commit | 11efe23224daa9b19d9470f0661b2c3bab68728f (patch) | |
tree | 8b6a6ff62d7a9ca48835fe30bf8e57ad63af249d /main/linux-grsec | |
parent | 75daeb0c974ac5b8ee4264ab863c1d14f3c9a794 (diff) | |
download | aports-11efe23224daa9b19d9470f0661b2c3bab68728f.tar.bz2 aports-11efe23224daa9b19d9470f0661b2c3bab68728f.tar.xz |
main/linux-grsec: upgrade to 2.2.0-2.6.32.16-201007162107
Diffstat (limited to 'main/linux-grsec')
-rw-r--r-- | main/linux-grsec/APKBUILD | 6 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007162107.patch (renamed from main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007112149.patch) | 448 |
2 files changed, 356 insertions, 98 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index 07993f9654..d3fca9fad6 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -4,7 +4,7 @@ _flavor=grsec pkgname=linux-${_flavor} pkgver=2.6.32.16 _kernver=2.6.32 -pkgrel=3 +pkgrel=4 pkgdesc="Linux kernel with grsecurity" url=http://grsecurity.net depends="mkinitfs linux-firmware" @@ -14,7 +14,7 @@ _config=${config:-kernelconfig.${CARCH:-x86}} install= source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2 ftp://ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2 - grsecurity-2.2.0-2.6.32.16-201007112149.patch + grsecurity-2.2.0-2.6.32.16-201007162107.patch 0001-grsec-revert-conflicting-flow-cache-changes.patch 0002-gre-fix-hard-header-destination-address-checking.patch 0003-ip_gre-include-route-header_len-in-max_headroom-calc.patch @@ -149,7 +149,7 @@ firmware() { md5sums="260551284ac224c3a43c4adac7df4879 linux-2.6.32.tar.bz2 744890f9651962ceae7663d44b19df65 patch-2.6.32.16.bz2 -34e3f7fe893034d29530ec1bd5584a33 grsecurity-2.2.0-2.6.32.16-201007112149.patch +feb09c4053225d0d13272af504075636 grsecurity-2.2.0-2.6.32.16-201007162107.patch 1d247140abec49b96250aec9aa59b324 0001-grsec-revert-conflicting-flow-cache-changes.patch 437317f88ec13ace8d39c31983a41696 0002-gre-fix-hard-header-destination-address-checking.patch 151b29a161178ed39d62a08f21f3484d 0003-ip_gre-include-route-header_len-in-max_headroom-calc.patch diff --git a/main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007112149.patch b/main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007162107.patch index e9f38fe06a..e74253e175 100644 --- a/main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007112149.patch +++ b/main/linux-grsec/grsecurity-2.2.0-2.6.32.16-201007162107.patch @@ -8137,7 +8137,7 @@ diff -urNp linux-2.6.32.16/arch/x86/include/asm/pgtable.h linux-2.6.32.16/arch/x #endif /* __ASSEMBLY__ */ diff -urNp linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h --- linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h 2010-07-05 14:14:00.000000000 -0400 -+++ linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h 2010-07-09 14:50:35.000000000 -0400 ++++ linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h 2010-07-16 21:06:43.000000000 -0400 @@ -16,12 +16,11 @@ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ @@ -8205,7 +8205,43 @@ diff -urNp linux-2.6.32.16/arch/x86/include/asm/pgtable_types.h linux-2.6.32.16/ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif -@@ -278,7 +281,16 @@ typedef struct page *pgtable_t; +@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p + { + return native_pgd_val(pgd) & PTE_FLAGS_MASK; + } ++#endif + ++#if PAGETABLE_LEVELS == 3 ++#include <asm-generic/pgtable-nopud.h> ++#endif ++ ++#if PAGETABLE_LEVELS == 2 ++#include <asm-generic/pgtable-nopmd.h> ++#endif ++ ++#ifndef __ASSEMBLY__ + #if PAGETABLE_LEVELS > 3 + typedef struct { pudval_t pud; } pud_t; + +@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu + return pud.pud; + } + #else +-#include <asm-generic/pgtable-nopud.h> +- + static inline pudval_t native_pud_val(pud_t pud) + { + return native_pgd_val(pud.pgd); +@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm + return pmd.pmd; + } + #else +-#include <asm-generic/pgtable-nopmd.h> +- + static inline pmdval_t native_pmd_val(pmd_t pmd) + { + return native_pgd_val(pmd.pud.pgd); +@@ -278,7 +287,16 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern void set_nx(void); @@ -15827,7 +15863,7 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/mmx_32.c linux-2.6.32.16/arch/x86/lib/mm to += 64; diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/putuser.S --- linux-2.6.32.16/arch/x86/lib/putuser.S 2010-07-05 14:14:00.000000000 -0400 -+++ linux-2.6.32.16/arch/x86/lib/putuser.S 2010-07-11 20:00:44.000000000 -0400 ++++ linux-2.6.32.16/arch/x86/lib/putuser.S 2010-07-16 21:06:43.000000000 -0400 @@ -15,7 +15,8 @@ #include <asm/thread_info.h> #include <asm/errno.h> @@ -15838,7 +15874,7 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p /* * __put_user_X -@@ -29,59 +30,156 @@ +@@ -29,59 +30,162 @@ * as they get called from within inline assembly. */ @@ -15848,6 +15884,12 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p #define EXIT ret ; \ CFI_ENDPROC ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define _DEST %_ASM_CX,%_ASM_BX ++#else ++#define _DEST %_ASM_CX ++#endif ++ .text ENTRY(__put_user_1) ENTER @@ -15859,18 +15901,19 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p + GET_THREAD_INFO(%_ASM_BX) cmp TI_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user +-1: movb %al,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX -+ jae 1234f -+ add %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx +1234: +#endif + +#endif + - 1: movb %al,(%_ASM_CX) ++1: movb %al,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss @@ -15893,18 +15936,19 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user +-2: movw %ax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX -+ jae 1234f -+ add %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx +1234: +#endif + +#endif + - 2: movw %ax,(%_ASM_CX) ++2: movw %ax,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss @@ -15927,18 +15971,19 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user +-3: movl %eax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX -+ jae 1234f -+ add %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx +1234: +#endif + +#endif + - 3: movl %eax,(%_ASM_CX) ++3: movl %eax,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss @@ -15961,20 +16006,22 @@ diff -urNp linux-2.6.32.16/arch/x86/lib/putuser.S linux-2.6.32.16/arch/x86/lib/p sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user +-4: mov %_ASM_AX,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX -+ jae 1234f -+ add %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx +1234: +#endif + +#endif + - 4: mov %_ASM_AX,(%_ASM_CX) ++4: mov %_ASM_AX,(_DEST) #ifdef CONFIG_X86_32 - 5: movl %edx,4(%_ASM_CX) +-5: movl %edx,4(%_ASM_CX) ++5: movl %edx,4(_DEST) #endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) @@ -18688,7 +18735,7 @@ diff -urNp linux-2.6.32.16/arch/x86/mm/pageattr-test.c linux-2.6.32.16/arch/x86/ struct split_state { diff -urNp linux-2.6.32.16/arch/x86/mm/pat.c linux-2.6.32.16/arch/x86/mm/pat.c --- linux-2.6.32.16/arch/x86/mm/pat.c 2010-07-05 14:14:00.000000000 -0400 -+++ linux-2.6.32.16/arch/x86/mm/pat.c 2010-07-09 14:50:35.000000000 -0400 ++++ linux-2.6.32.16/arch/x86/mm/pat.c 2010-07-16 21:06:43.000000000 -0400 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct conflict: @@ -18707,6 +18754,17 @@ diff -urNp linux-2.6.32.16/arch/x86/mm/pat.c linux-2.6.32.16/arch/x86/mm/pat.c } dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); +@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { + printk(KERN_INFO +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", +- current->comm, from, to); ++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", ++ current->comm, from, to, cursor); + return 0; + } + cursor += PAGE_SIZE; @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " @@ -20136,7 +20194,7 @@ diff -urNp linux-2.6.32.16/crypto/lrw.c linux-2.6.32.16/crypto/lrw.c crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/dontdiff --- linux-2.6.32.16/Documentation/dontdiff 2010-07-05 14:14:00.000000000 -0400 -+++ linux-2.6.32.16/Documentation/dontdiff 2010-07-09 14:50:35.000000000 -0400 ++++ linux-2.6.32.16/Documentation/dontdiff 2010-07-16 21:06:43.000000000 -0400 @@ -3,6 +3,7 @@ *.bin *.cpio @@ -20145,15 +20203,18 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ *.dsp *.dvi *.elf -@@ -40,6 +41,7 @@ +@@ -38,8 +39,10 @@ + *.tab.h + *.tex *.ver ++*.vim *.xml *_MODULES +*_reg_safe.h *_vga16.c *~ *.9 -@@ -49,11 +51,16 @@ +@@ -49,11 +52,16 @@ 53c700_d.h CVS ChangeSet @@ -20170,7 +20231,7 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ SCCS System.map* TAGS -@@ -76,7 +83,10 @@ btfixupprep +@@ -76,7 +84,10 @@ btfixupprep build bvmlinux bzImage* @@ -20181,7 +20242,7 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ comp*.log compile.h* conf -@@ -103,13 +113,14 @@ gen_crc32table +@@ -103,13 +114,14 @@ gen_crc32table gen_init_cpio genksyms *_gray256.c @@ -20197,7 +20258,7 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ keywords.c ksym.c* ksym.h* -@@ -133,7 +144,9 @@ mkboot +@@ -133,7 +145,9 @@ mkboot mkbugboot mkcpustr mkdep @@ -20207,7 +20268,7 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ mktables mktree modpost -@@ -149,6 +162,7 @@ patches* +@@ -149,6 +163,7 @@ patches* pca200e.bin pca200e_ecd.bin2 piggy.gz @@ -20215,7 +20276,14 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ piggyback pnmtologo ppc_defs.h* -@@ -163,6 +177,7 @@ setup +@@ -157,12 +172,14 @@ qconf + raid6altivec*.c + raid6int*.c + raid6tables.c ++regdb.c + relocs + series + setup setup.bin setup.elf sImage @@ -20223,7 +20291,7 @@ diff -urNp linux-2.6.32.16/Documentation/dontdiff linux-2.6.32.16/Documentation/ sm_tbl* split-include syscalltab.h -@@ -186,14 +201,20 @@ version.h* +@@ -186,14 +203,20 @@ version.h* vmlinux vmlinux-* vmlinux.aout @@ -44351,6 +44419,78 @@ diff -urNp linux-2.6.32.16/include/asm-generic/pgtable.h linux-2.6.32.16/include #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */ +diff -urNp linux-2.6.32.16/include/asm-generic/pgtable-nopmd.h linux-2.6.32.16/include/asm-generic/pgtable-nopmd.h +--- linux-2.6.32.16/include/asm-generic/pgtable-nopmd.h 2010-07-05 14:14:00.000000000 -0400 ++++ linux-2.6.32.16/include/asm-generic/pgtable-nopmd.h 2010-07-16 21:06:43.000000000 -0400 +@@ -1,14 +1,19 @@ + #ifndef _PGTABLE_NOPMD_H + #define _PGTABLE_NOPMD_H + +-#ifndef __ASSEMBLY__ +- + #include <asm-generic/pgtable-nopud.h> + +-struct mm_struct; +- + #define __PAGETABLE_PMD_FOLDED + ++#define PMD_SHIFT PUD_SHIFT ++#define PTRS_PER_PMD 1 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ ++struct mm_struct; ++ + /* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into +@@ -16,11 +21,6 @@ struct mm_struct; + */ + typedef struct { pud_t pud; } pmd_t; + +-#define PMD_SHIFT PUD_SHIFT +-#define PTRS_PER_PMD 1 +-#define PMD_SIZE (1UL << PMD_SHIFT) +-#define PMD_MASK (~(PMD_SIZE-1)) +- + /* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded +diff -urNp linux-2.6.32.16/include/asm-generic/pgtable-nopud.h linux-2.6.32.16/include/asm-generic/pgtable-nopud.h +--- linux-2.6.32.16/include/asm-generic/pgtable-nopud.h 2010-07-05 14:14:00.000000000 -0400 ++++ linux-2.6.32.16/include/asm-generic/pgtable-nopud.h 2010-07-16 21:06:43.000000000 -0400 +@@ -1,10 +1,15 @@ + #ifndef _PGTABLE_NOPUD_H + #define _PGTABLE_NOPUD_H + +-#ifndef __ASSEMBLY__ +- + #define __PAGETABLE_PUD_FOLDED + ++#define PUD_SHIFT PGDIR_SHIFT ++#define PTRS_PER_PUD 1 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ + /* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into +@@ -12,11 +17,6 @@ + */ + typedef struct { pgd_t pgd; } pud_t; + +-#define PUD_SHIFT PGDIR_SHIFT +-#define PTRS_PER_PUD 1 +-#define PUD_SIZE (1UL << PUD_SHIFT) +-#define PUD_MASK (~(PUD_SIZE-1)) +- + /* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded diff -urNp linux-2.6.32.16/include/asm-generic/vmlinux.lds.h linux-2.6.32.16/include/asm-generic/vmlinux.lds.h --- linux-2.6.32.16/include/asm-generic/vmlinux.lds.h 2010-07-05 14:14:00.000000000 -0400 +++ linux-2.6.32.16/include/asm-generic/vmlinux.lds.h 2010-07-09 14:50:38.000000000 -0400 @@ -46493,6 +46633,18 @@ diff -urNp linux-2.6.32.16/include/linux/mmu_notifier.h linux-2.6.32.16/include/ }) #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +diff -urNp linux-2.6.32.16/include/linux/mmzone.h linux-2.6.32.16/include/linux/mmzone.h +--- linux-2.6.32.16/include/linux/mmzone.h 2010-07-05 14:14:00.000000000 -0400 ++++ linux-2.6.32.16/include/linux/mmzone.h 2010-07-16 21:06:43.000000000 -0400 +@@ -343,7 +343,7 @@ struct zone { + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * prev_priority holds the scanning priority for this zone. It is diff -urNp linux-2.6.32.16/include/linux/mod_devicetable.h linux-2.6.32.16/include/linux/mod_devicetable.h --- linux-2.6.32.16/include/linux/mod_devicetable.h 2010-07-05 14:14:00.000000000 -0400 +++ linux-2.6.32.16/include/linux/mod_devicetable.h 2010-07-09 14:50:38.000000000 -0400 @@ -47502,6 +47654,63 @@ diff -urNp linux-2.6.32.16/include/linux/vmalloc.h linux-2.6.32.16/include/linux +}) + #endif /* _LINUX_VMALLOC_H */ +diff -urNp linux-2.6.32.16/include/linux/vmstat.h linux-2.6.32.16/include/linux/vmstat.h +--- linux-2.6.32.16/include/linux/vmstat.h 2010-07-05 14:14:00.000000000 -0400 ++++ linux-2.6.32.16/include/linux/vmstat.h 2010-07-16 21:06:43.000000000 -0400 +@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in + /* + * Zone based page accounting with per cpu differentials. + */ +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) + { +- atomic_long_add(x, &zone->vm_stat[item]); +- atomic_long_add(x, &vm_stat[item]); ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]); ++ atomic_long_add_unchecked(x, &vm_stat[item]); + } + + static inline unsigned long global_page_state(enum zone_stat_item item) + { +- long x = atomic_long_read(&vm_stat[item]); ++ long x = atomic_long_read_unchecked(&vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -158,7 +158,7 @@ static inline unsigned long global_page_ + static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -242,8 +242,8 @@ static inline void __mod_zone_page_state + + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_inc(&zone->vm_stat[item]); +- atomic_long_inc(&vm_stat[item]); ++ atomic_long_inc_unchecked(&zone->vm_stat[item]); ++ atomic_long_inc_unchecked(&vm_stat[item]); + } + + static inline void __inc_zone_page_state(struct page *page, +@@ -254,8 +254,8 @@ static inline void __inc_zone_page_state + + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_dec(&zone->vm_stat[item]); +- atomic_long_dec(&vm_stat[item]); ++ atomic_long_dec_unchecked(&zone->vm_stat[item]); ++ atomic_long_dec_unchecked(&vm_stat[item]); + } + + static inline void __dec_zone_page_state(struct page *page, diff -urNp linux-2.6.32.16/include/net/irda/ircomm_tty.h linux-2.6.32.16/include/net/irda/ircomm_tty.h --- linux-2.6.32.16/include/net/irda/ircomm_tty.h 2010-07-05 14:14:00.000000000 -0400 +++ linux-2.6.32.16/include/net/irda/ircomm_tty.h 2010-07-09 14:50:38.000000000 -0400 @@ -51578,7 +51787,7 @@ diff -urNp linux-2.6.32.16/mm/mlock.c linux-2.6.32.16/mm/mlock.c ret = do_mlockall(flags); diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c --- linux-2.6.32.16/mm/mmap.c 2010-07-05 14:14:00.000000000 -0400 -+++ linux-2.6.32.16/mm/mmap.c 2010-07-09 14:50:38.000000000 -0400 ++++ linux-2.6.32.16/mm/mmap.c 2010-07-16 21:06:43.000000000 -0400 @@ -45,6 +45,16 @@ #define arch_rebalance_pgtables(addr, len) (addr) #endif @@ -51793,22 +52002,18 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (addr & ~PAGE_MASK) return addr; -@@ -959,6 +1035,26 @@ unsigned long do_mmap_pgoff(struct file +@@ -959,6 +1035,22 @@ unsigned long do_mmap_pgoff(struct file vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ +#ifdef CONFIG_PAX_MPROTECT -+ if (mm->pax_flags & MF_PAX_MPROTECT) { -+ if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC) -+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); -+ else -+ vm_flags &= ~(VM_WRITE | VM_MAYWRITE); -+ } -+#endif -+ ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++ if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC)) ++ return -EPERM; ++ if (!(prot & PROT_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++ else ++ vm_flags &= ~VM_MAYWRITE; + } +#endif + @@ -51820,7 +52025,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -970,6 +1066,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -970,6 +1062,7 @@ unsigned long do_mmap_pgoff(struct file locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; @@ -51828,7 +52033,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } -@@ -1043,6 +1140,9 @@ unsigned long do_mmap_pgoff(struct file +@@ -1043,6 +1136,9 @@ unsigned long do_mmap_pgoff(struct file if (error) return error; @@ -51838,7 +52043,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return mmap_region(file, addr, len, flags, vm_flags, pgoff); } EXPORT_SYMBOL(do_mmap_pgoff); -@@ -1055,10 +1155,10 @@ EXPORT_SYMBOL(do_mmap_pgoff); +@@ -1055,10 +1151,10 @@ EXPORT_SYMBOL(do_mmap_pgoff); */ int vma_wants_writenotify(struct vm_area_struct *vma) { @@ -51851,7 +52056,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return 0; /* The backer wishes to know when pages are first written to? */ -@@ -1107,14 +1207,24 @@ unsigned long mmap_region(struct file *f +@@ -1107,14 +1203,24 @@ unsigned long mmap_region(struct file *f unsigned long charged = 0; struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; @@ -51878,7 +52083,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c } /* Check against address space limit. */ -@@ -1163,6 +1273,16 @@ munmap_back: +@@ -1163,6 +1269,16 @@ munmap_back: goto unacct_error; } @@ -51895,7 +52100,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; -@@ -1185,6 +1305,19 @@ munmap_back: +@@ -1185,6 +1301,19 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; @@ -51915,7 +52120,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (vm_flags & VM_EXECUTABLE) added_exe_file_vma(mm); -@@ -1208,6 +1341,11 @@ munmap_back: +@@ -1208,6 +1337,11 @@ munmap_back: vma_link(mm, vma, prev, rb_link, rb_parent); file = vma->vm_file; @@ -51927,7 +52132,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* Once vma denies write, undo our temporary denial count */ if (correct_wcount) atomic_inc(&inode->i_writecount); -@@ -1216,6 +1354,7 @@ out: +@@ -1216,6 +1350,7 @@ out: mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); @@ -51935,7 +52140,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (vm_flags & VM_LOCKED) { /* * makes pages present; downgrades, drops, reacquires mmap_sem -@@ -1238,6 +1377,12 @@ unmap_and_free_vma: +@@ -1238,6 +1373,12 @@ unmap_and_free_vma: unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; free_vma: @@ -51948,7 +52153,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -1271,6 +1416,10 @@ arch_get_unmapped_area(struct file *filp +@@ -1271,6 +1412,10 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; @@ -51959,7 +52164,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); -@@ -1279,10 +1428,10 @@ arch_get_unmapped_area(struct file *filp +@@ -1279,10 +1424,10 @@ arch_get_unmapped_area(struct file *filp return addr; } if (len > mm->cached_hole_size) { @@ -51973,7 +52178,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c } full_search: -@@ -1293,9 +1442,8 @@ full_search: +@@ -1293,9 +1438,8 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -51985,7 +52190,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c mm->cached_hole_size = 0; goto full_search; } -@@ -1317,10 +1465,16 @@ full_search: +@@ -1317,10 +1461,16 @@ full_search: void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { @@ -52003,7 +52208,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } -@@ -1338,7 +1492,7 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1338,7 +1488,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -52012,7 +52217,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -1347,6 +1501,10 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1347,6 +1497,10 @@ arch_get_unmapped_area_topdown(struct fi if (flags & MAP_FIXED) return addr; @@ -52023,7 +52228,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); -@@ -1404,13 +1562,21 @@ bottomup: +@@ -1404,13 +1558,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -52047,7 +52252,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c mm->cached_hole_size = ~0UL; return addr; -@@ -1419,6 +1585,12 @@ bottomup: +@@ -1419,6 +1581,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { @@ -52060,7 +52265,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * Is this a new hole at the highest possible address? */ -@@ -1426,8 +1598,10 @@ void arch_unmap_area_topdown(struct mm_s +@@ -1426,8 +1594,10 @@ void arch_unmap_area_topdown(struct mm_s mm->free_area_cache = addr; /* dont allow allocations above current base */ @@ -52072,7 +52277,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c } unsigned long -@@ -1535,6 +1709,27 @@ out: +@@ -1535,6 +1705,27 @@ out: return prev ? prev->vm_next : vma; } @@ -52100,7 +52305,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the -@@ -1551,6 +1746,7 @@ static int acct_stack_growth(struct vm_a +@@ -1551,6 +1742,7 @@ static int acct_stack_growth(struct vm_a return -ENOMEM; /* Stack limit test */ @@ -52108,7 +52313,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (size > rlim[RLIMIT_STACK].rlim_cur) return -ENOMEM; -@@ -1560,6 +1756,7 @@ static int acct_stack_growth(struct vm_a +@@ -1560,6 +1752,7 @@ static int acct_stack_growth(struct vm_a unsigned long limit; locked = mm->locked_vm + grow; limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; @@ -52116,7 +52321,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1595,35 +1792,40 @@ static +@@ -1595,35 +1788,40 @@ static #endif int expand_upwards(struct vm_area_struct *vma, unsigned long address) { @@ -52167,7 +52372,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c unsigned long size, grow; size = address - vma->vm_start; -@@ -1633,6 +1835,8 @@ int expand_upwards(struct vm_area_struct +@@ -1633,6 +1831,8 @@ int expand_upwards(struct vm_area_struct if (!error) vma->vm_end = address; } @@ -52176,7 +52381,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c anon_vma_unlock(vma); return error; } -@@ -1644,7 +1848,8 @@ int expand_upwards(struct vm_area_struct +@@ -1644,7 +1844,8 @@ int expand_upwards(struct vm_area_struct static int expand_downwards(struct vm_area_struct *vma, unsigned long address) { @@ -52186,7 +52391,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * We must make sure the anon_vma is allocated -@@ -1658,6 +1863,15 @@ static int expand_downwards(struct vm_ar +@@ -1658,6 +1859,15 @@ static int expand_downwards(struct vm_ar if (error) return error; @@ -52202,7 +52407,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c anon_vma_lock(vma); /* -@@ -1667,9 +1881,15 @@ static int expand_downwards(struct vm_ar +@@ -1667,9 +1877,15 @@ static int expand_downwards(struct vm_ar */ /* Somebody else might have raced and expanded it already */ @@ -52219,7 +52424,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1677,9 +1897,20 @@ static int expand_downwards(struct vm_ar +@@ -1677,9 +1893,20 @@ static int expand_downwards(struct vm_ar if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -52240,7 +52445,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return error; } -@@ -1755,6 +1986,13 @@ static void remove_vma_list(struct mm_st +@@ -1755,6 +1982,13 @@ static void remove_vma_list(struct mm_st do { long nrpages = vma_pages(vma); @@ -52254,7 +52459,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -@@ -1799,6 +2037,16 @@ detach_vmas_to_be_unmapped(struct mm_str +@@ -1799,6 +2033,16 @@ detach_vmas_to_be_unmapped(struct mm_str insertion_point = (prev ? &prev->vm_next : &mm->mmap); do { @@ -52271,7 +52476,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1824,10 +2072,25 @@ int split_vma(struct mm_struct * mm, str +@@ -1824,10 +2068,25 @@ int split_vma(struct mm_struct * mm, str struct mempolicy *pol; struct vm_area_struct *new; @@ -52297,7 +52502,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -1835,6 +2098,16 @@ int split_vma(struct mm_struct * mm, str +@@ -1835,6 +2094,16 @@ int split_vma(struct mm_struct * mm, str if (!new) return -ENOMEM; @@ -52314,7 +52519,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1845,8 +2118,29 @@ int split_vma(struct mm_struct * mm, str +@@ -1845,8 +2114,29 @@ int split_vma(struct mm_struct * mm, str new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -52344,7 +52549,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c kmem_cache_free(vm_area_cachep, new); return PTR_ERR(pol); } -@@ -1867,6 +2161,28 @@ int split_vma(struct mm_struct * mm, str +@@ -1867,6 +2157,28 @@ int split_vma(struct mm_struct * mm, str else vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -52373,13 +52578,13 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return 0; } -@@ -1875,11 +2191,30 @@ int split_vma(struct mm_struct * mm, str +@@ -1875,11 +2187,30 @@ int split_vma(struct mm_struct * mm, str * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ +#ifdef CONFIG_PAX_SEGMEXEC - int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) - { ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++{ + int ret = __do_munmap(mm, start, len); + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) + return ret; @@ -52389,9 +52594,9 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c + +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +#else -+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +#endif -+{ + { unsigned long end; struct vm_area_struct *vma, *prev, *last; @@ -52404,7 +52609,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -1943,6 +2278,8 @@ int do_munmap(struct mm_struct *mm, unsi +@@ -1943,6 +2274,8 @@ int do_munmap(struct mm_struct *mm, unsi /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -52413,7 +52618,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return 0; } -@@ -1955,22 +2292,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a +@@ -1955,22 +2288,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a profile_munmap(addr); @@ -52442,7 +52647,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -1984,6 +2317,7 @@ unsigned long do_brk(unsigned long addr, +@@ -1984,6 +2313,7 @@ unsigned long do_brk(unsigned long addr, struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -52450,7 +52655,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c len = PAGE_ALIGN(len); if (!len) -@@ -1995,16 +2329,30 @@ unsigned long do_brk(unsigned long addr, +@@ -1995,16 +2325,30 @@ unsigned long do_brk(unsigned long addr, flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -52482,7 +52687,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; -@@ -2021,22 +2369,22 @@ unsigned long do_brk(unsigned long addr, +@@ -2021,22 +2365,22 @@ unsigned long do_brk(unsigned long addr, /* * Clear old maps. this also does some error checking for us */ @@ -52509,7 +52714,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2050,7 +2398,7 @@ unsigned long do_brk(unsigned long addr, +@@ -2050,7 +2394,7 @@ unsigned long do_brk(unsigned long addr, */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -52518,7 +52723,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return -ENOMEM; } -@@ -2062,11 +2410,12 @@ unsigned long do_brk(unsigned long addr, +@@ -2062,11 +2406,12 @@ unsigned long do_brk(unsigned long addr, vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: @@ -52533,7 +52738,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return addr; } -@@ -2113,8 +2462,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2113,8 +2458,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -52545,7 +52750,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2128,6 +2479,10 @@ int insert_vm_struct(struct mm_struct * +@@ -2128,6 +2475,10 @@ int insert_vm_struct(struct mm_struct * struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -52556,7 +52761,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2150,7 +2505,22 @@ int insert_vm_struct(struct mm_struct * +@@ -2150,7 +2501,22 @@ int insert_vm_struct(struct mm_struct * if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -52579,7 +52784,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c return 0; } -@@ -2168,6 +2538,8 @@ struct vm_area_struct *copy_vma(struct v +@@ -2168,6 +2534,8 @@ struct vm_area_struct *copy_vma(struct v struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -52588,7 +52793,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2211,6 +2583,35 @@ struct vm_area_struct *copy_vma(struct v +@@ -2211,6 +2579,35 @@ struct vm_area_struct *copy_vma(struct v return new_vma; } @@ -52624,7 +52829,7 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c /* * Return true if the calling process may expand its vm space by the passed * number of pages -@@ -2221,7 +2622,7 @@ int may_expand_vm(struct mm_struct *mm, +@@ -2221,7 +2618,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long lim; lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; @@ -52633,16 +52838,18 @@ diff -urNp linux-2.6.32.16/mm/mmap.c linux-2.6.32.16/mm/mmap.c if (cur + npages > lim) return 0; return 1; -@@ -2290,6 +2691,15 @@ int install_special_mapping(struct mm_st +@@ -2290,6 +2687,17 @@ int install_special_mapping(struct mm_st vma->vm_start = addr; vma->vm_end = addr + len; +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) { -+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) -+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ return -EPERM; ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; + else -+ vm_flags &= ~(VM_WRITE | VM_MAYWRITE); ++ vm_flags &= ~VM_MAYWRITE; + } +#endif + @@ -53927,6 +54134,57 @@ diff -urNp linux-2.6.32.16/mm/vmalloc.c linux-2.6.32.16/mm/vmalloc.c void *vmalloc_32_user(unsigned long size) { struct vm_struct *area; +diff -urNp linux-2.6.32.16/mm/vmstat.c linux-2.6.32.16/mm/vmstat.c +--- linux-2.6.32.16/mm/vmstat.c 2010-07-05 14:14:00.000000000 -0400 ++++ linux-2.6.32.16/mm/vmstat.c 2010-07-16 21:06:43.000000000 -0400 +@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu) + * + * vm_stat contains the global counters + */ +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + EXPORT_SYMBOL(vm_stat); + + #ifdef CONFIG_SMP +@@ -311,7 +311,7 @@ void refresh_cpu_vm_stats(int cpu) + v = p->vm_stat_diff[i]; + p->vm_stat_diff[i] = 0; + local_irq_restore(flags); +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + #ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ +@@ -349,7 +349,7 @@ void refresh_cpu_vm_stats(int cpu) + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (global_diff[i]) +- atomic_long_add(global_diff[i], &vm_stat[i]); ++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); + } + + #endif +@@ -940,10 +940,16 @@ static int __init setup_vmstat(void) + start_cpu_timer(cpu); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ { ++ mode_t gr_mode = S_IRUGO; ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); ++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations); ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); ++ } + #endif + return 0; + } diff -urNp linux-2.6.32.16/net/8021q/vlan.c linux-2.6.32.16/net/8021q/vlan.c --- linux-2.6.32.16/net/8021q/vlan.c 2010-07-05 14:14:00.000000000 -0400 +++ linux-2.6.32.16/net/8021q/vlan.c 2010-07-09 14:50:38.000000000 -0400 |