aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch')
-rw-r--r--main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch53111
1 files changed, 53111 insertions, 0 deletions
diff --git a/main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch b/main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch
new file mode 100644
index 0000000000..8e329eee84
--- /dev/null
+++ b/main/linux-grsec/grsecurity-2.1.14-2.6.31.7-200912081744.patch
@@ -0,0 +1,53111 @@
+diff -urNp linux-2.6.31.7/arch/alpha/include/asm/atomic.h linux-2.6.31.7/arch/alpha/include/asm/atomic.h
+--- linux-2.6.31.7/arch/alpha/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/include/asm/atomic.h 2009-12-08 17:39:42.713637173 -0500
+@@ -18,9 +18,11 @@
+ #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+
+ #define atomic_read(v) ((v)->counter + 0)
++#define atomic_read_unchecked(v) ((v)->counter + 0)
+ #define atomic64_read(v) ((v)->counter + 0)
+
+ #define atomic_set(v,i) ((v)->counter = (i))
++#define atomic_set_unchecked(v,i) ((v)->counter = (i))
+ #define atomic64_set(v,i) ((v)->counter = (i))
+
+ /*
+@@ -44,6 +46,11 @@ static __inline__ void atomic_add(int i,
+ :"Ir" (i), "m" (v->counter));
+ }
+
++static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t * v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ static __inline__ void atomic64_add(long i, atomic64_t * v)
+ {
+ unsigned long temp;
+@@ -74,6 +81,11 @@ static __inline__ void atomic_sub(int i,
+ :"Ir" (i), "m" (v->counter));
+ }
+
++static __inline__ void atomic_sub_unchecked(int i, atomic_unchecked_t * v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ static __inline__ void atomic64_sub(long i, atomic64_t * v)
+ {
+ unsigned long temp;
+@@ -246,6 +258,7 @@ static __inline__ int atomic64_add_unles
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1,(v))
+ #define atomic64_inc(v) atomic64_add(1,(v))
+
+ #define atomic_dec(v) atomic_sub(1,(v))
+diff -urNp linux-2.6.31.7/arch/alpha/include/asm/elf.h linux-2.6.31.7/arch/alpha/include/asm/elf.h
+--- linux-2.6.31.7/arch/alpha/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/include/asm/elf.h 2009-12-08 17:39:42.713637173 -0500
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urNp linux-2.6.31.7/arch/alpha/include/asm/pgtable.h linux-2.6.31.7/arch/alpha/include/asm/pgtable.h
+--- linux-2.6.31.7/arch/alpha/include/asm/pgtable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/include/asm/pgtable.h 2009-12-08 17:39:42.713637173 -0500
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urNp linux-2.6.31.7/arch/alpha/kernel/module.c linux-2.6.31.7/arch/alpha/kernel/module.c
+--- linux-2.6.31.7/arch/alpha/kernel/module.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/kernel/module.c 2009-12-08 17:39:42.713637173 -0500
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff -urNp linux-2.6.31.7/arch/alpha/kernel/osf_sys.c linux-2.6.31.7/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.31.7/arch/alpha/kernel/osf_sys.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/kernel/osf_sys.c 2009-12-08 17:39:42.715135688 -0500
+@@ -1212,6 +1212,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1219,8 +1223,8 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urNp linux-2.6.31.7/arch/alpha/mm/fault.c linux-2.6.31.7/arch/alpha/mm/fault.c
+--- linux-2.6.31.7/arch/alpha/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/alpha/mm/fault.c 2009-12-08 17:39:42.715135688 -0500
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urNp linux-2.6.31.7/arch/arm/include/asm/atomic.h linux-2.6.31.7/arch/arm/include/asm/atomic.h
+--- linux-2.6.31.7/arch/arm/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/arm/include/asm/atomic.h 2009-12-08 17:39:42.722145546 -0500
+@@ -20,6 +20,7 @@
+ #ifdef __KERNEL__
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -44,6 +45,11 @@ static inline void atomic_set(atomic_t *
+ : "cc");
+ }
+
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ atomic_set((atomic_t *)v, i);
++}
++
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+ unsigned long tmp;
+@@ -60,6 +66,11 @@ static inline void atomic_add(int i, ato
+ : "cc");
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+ unsigned long tmp;
+@@ -98,6 +109,11 @@ static inline void atomic_sub(int i, ato
+ : "cc");
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ static inline int atomic_sub_return(int i, atomic_t *v)
+ {
+ unsigned long tmp;
+@@ -164,6 +180,7 @@ static inline void atomic_clear_mask(uns
+ #endif
+
+ #define atomic_set(v,i) (((v)->counter) = (i))
++#define atomic_set_unchecked(v,i) (((v)->counter) = (i))
+
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+@@ -232,6 +249,7 @@ static inline int atomic_add_unless(atom
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, v)
+ #define atomic_dec(v) atomic_sub(1, v)
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+diff -urNp linux-2.6.31.7/arch/arm/include/asm/elf.h linux-2.6.31.7/arch/arm/include/asm/elf.h
+--- linux-2.6.31.7/arch/arm/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/arm/include/asm/elf.h 2009-12-08 17:39:42.722697255 -0500
+@@ -103,7 +103,14 @@ extern int arm_elf_read_implies_exec(con
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff -urNp linux-2.6.31.7/arch/arm/include/asm/kmap_types.h linux-2.6.31.7/arch/arm/include/asm/kmap_types.h
+--- linux-2.6.31.7/arch/arm/include/asm/kmap_types.h 2009-12-08 17:29:51.577600232 -0500
++++ linux-2.6.31.7/arch/arm/include/asm/kmap_types.h 2009-12-08 17:39:42.724479026 -0500
+@@ -19,6 +19,7 @@ enum km_type {
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_L2_CACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.31.7/arch/arm/include/asm/uaccess.h linux-2.6.31.7/arch/arm/include/asm/uaccess.h
+--- linux-2.6.31.7/arch/arm/include/asm/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/arm/include/asm/uaccess.h 2009-12-08 17:39:42.724813627 -0500
+@@ -400,6 +400,9 @@ extern unsigned long __must_check __strn
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -409,6 +412,9 @@ static inline unsigned long __must_check
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -urNp linux-2.6.31.7/arch/arm/mach-ns9xxx/clock.c linux-2.6.31.7/arch/arm/mach-ns9xxx/clock.c
+--- linux-2.6.31.7/arch/arm/mach-ns9xxx/clock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/arm/mach-ns9xxx/clock.c 2009-12-08 17:39:42.724813627 -0500
+@@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode
+ return single_open(file, clk_debugfs_show, NULL);
+ }
+
+-static struct file_operations clk_debugfs_operations = {
++static const struct file_operations clk_debugfs_operations = {
+ .open = clk_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/arch/arm/mm/mmap.c linux-2.6.31.7/arch/arm/mm/mmap.c
+--- linux-2.6.31.7/arch/arm/mm/mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/arm/mm/mmap.c 2009-12-08 17:39:42.724813627 -0500
+@@ -62,6 +62,10 @@ arch_get_unmapped_area(struct file *filp
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -74,10 +78,10 @@ arch_get_unmapped_area(struct file *filp
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -93,8 +97,8 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+diff -urNp linux-2.6.31.7/arch/avr32/include/asm/atomic.h linux-2.6.31.7/arch/avr32/include/asm/atomic.h
+--- linux-2.6.31.7/arch/avr32/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/avr32/include/asm/atomic.h 2009-12-08 17:39:42.724813627 -0500
+@@ -20,7 +20,9 @@
+ #define ATOMIC_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+
+ /*
+ * atomic_sub_return - subtract the atomic variable
+@@ -48,6 +50,18 @@ static inline int atomic_sub_return(int
+ }
+
+ /*
++ * atomic_sub_return_unchecked - subtract the atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v. Returns the resulting value.
++ */
++static inline int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_sub_return(i, (atomic_t *)v);
++}
++
++/*
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -76,6 +90,18 @@ static inline int atomic_add_return(int
+ }
+
+ /*
++ * atomic_add_return_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v. Returns the resulting value.
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_return(i, (atomic_t *)v);
++}
++
++/*
+ * atomic_sub_unless - sub unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+@@ -176,9 +202,12 @@ static inline int atomic_sub_if_positive
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+ #define atomic_sub(i, v) (void)atomic_sub_return(i, v)
++#define atomic_sub_unchecked(i, v) (void)atomic_sub_return_unchecked(i, v)
+ #define atomic_add(i, v) (void)atomic_add_return(i, v)
++#define atomic_add_unchecked(i, v) (void)atomic_add_return_unchecked(i, v)
+ #define atomic_dec(v) atomic_sub(1, (v))
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) (void)atomic_add_return_unchecked(1, (v))
+
+ #define atomic_dec_return(v) atomic_sub_return(1, v)
+ #define atomic_inc_return(v) atomic_add_return(1, v)
+diff -urNp linux-2.6.31.7/arch/avr32/include/asm/elf.h linux-2.6.31.7/arch/avr32/include/asm/elf.h
+--- linux-2.6.31.7/arch/avr32/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/avr32/include/asm/elf.h 2009-12-08 17:39:42.726144046 -0500
+@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff -urNp linux-2.6.31.7/arch/avr32/include/asm/kmap_types.h linux-2.6.31.7/arch/avr32/include/asm/kmap_types.h
+--- linux-2.6.31.7/arch/avr32/include/asm/kmap_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/avr32/include/asm/kmap_types.h 2009-12-08 17:39:42.726144046 -0500
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.31.7/arch/avr32/mm/fault.c linux-2.6.31.7/arch/avr32/mm/fault.c
+--- linux-2.6.31.7/arch/avr32/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/avr32/mm/fault.c 2009-12-08 17:39:42.726144046 -0500
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -157,6 +174,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff -urNp linux-2.6.31.7/arch/blackfin/include/asm/atomic.h linux-2.6.31.7/arch/blackfin/include/asm/atomic.h
+--- linux-2.6.31.7/arch/blackfin/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/blackfin/include/asm/atomic.h 2009-12-08 17:39:42.727143822 -0500
+@@ -15,8 +15,10 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+
+ #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
++#define atomic_read_unchecked(v) __raw_uncached_fetch_asm(&(v)->counter)
+
+ asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
+
+@@ -35,11 +37,21 @@ static inline void atomic_add(int i, ato
+ __raw_atomic_update_asm(&v->counter, i);
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+ __raw_atomic_update_asm(&v->counter, -i);
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+ return __raw_atomic_update_asm(&v->counter, i);
+@@ -55,6 +67,11 @@ static inline void atomic_inc(volatile a
+ __raw_atomic_update_asm(&v->counter, 1);
+ }
+
++static inline void atomic_inc_unchecked(volatile atomic_unchecked_t *v)
++{
++ atomic_inc((atomic_t *)v);
++}
++
+ static inline void atomic_dec(volatile atomic_t *v)
+ {
+ __raw_atomic_update_asm(&v->counter, -1);
+diff -urNp linux-2.6.31.7/arch/blackfin/mach-bf561/coreb.c linux-2.6.31.7/arch/blackfin/mach-bf561/coreb.c
+--- linux-2.6.31.7/arch/blackfin/mach-bf561/coreb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/blackfin/mach-bf561/coreb.c 2009-12-08 17:39:42.727143822 -0500
+@@ -48,7 +48,7 @@ coreb_ioctl(struct inode *inode, struct
+ return ret;
+ }
+
+-static struct file_operations coreb_fops = {
++static const struct file_operations coreb_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = coreb_ioctl,
+ };
+diff -urNp linux-2.6.31.7/arch/cris/arch-v10/drivers/sync_serial.c linux-2.6.31.7/arch/cris/arch-v10/drivers/sync_serial.c
+--- linux-2.6.31.7/arch/cris/arch-v10/drivers/sync_serial.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/cris/arch-v10/drivers/sync_serial.c 2009-12-08 17:39:42.727143822 -0500
+@@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_sha
+
+ #define NUMBER_OF_PORTS 2
+
+-static struct file_operations sync_serial_fops = {
++static const struct file_operations sync_serial_fops = {
+ .owner = THIS_MODULE,
+ .write = sync_serial_write,
+ .read = sync_serial_read,
+diff -urNp linux-2.6.31.7/arch/cris/arch-v32/drivers/mach-fs/gpio.c linux-2.6.31.7/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+--- linux-2.6.31.7/arch/cris/arch-v32/drivers/mach-fs/gpio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/cris/arch-v32/drivers/mach-fs/gpio.c 2009-12-08 17:39:42.728141779 -0500
+@@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsign
+ return 0;
+ }
+
+-struct file_operations gpio_fops = {
++struct struct file_operations gpio_fops = {
+ .owner = THIS_MODULE,
+ .poll = gpio_poll,
+ .ioctl = gpio_ioctl,
+diff -urNp linux-2.6.31.7/arch/cris/include/asm/atomic.h linux-2.6.31.7/arch/cris/include/asm/atomic.h
+--- linux-2.6.31.7/arch/cris/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/cris/include/asm/atomic.h 2009-12-08 17:39:42.728663883 -0500
+@@ -16,7 +16,9 @@
+ #define ATOMIC_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v,i) (((v)->counter) = (i))
++#define atomic_set_unchecked(v,i) (((v)->counter) = (i))
+
+ /* These should be written in asm but we do it in C for now. */
+
+@@ -28,6 +30,11 @@ static inline void atomic_add(int i, vol
+ cris_atomic_restore(v, flags);
+ }
+
++static inline void atomic_add_unchecked(int i, volatile atomic_unchecked_t *v)
++{
++ atomic_add(i, (volatile atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, volatile atomic_t *v)
+ {
+ unsigned long flags;
+@@ -36,6 +43,11 @@ static inline void atomic_sub(int i, vol
+ cris_atomic_restore(v, flags);
+ }
+
++static inline void atomic_sub_unchecked(int i, volatile atomic_unchecked_t *v)
++{
++ atomic_sub(i, (volatile atomic_t *)v);
++}
++
+ static inline int atomic_add_return(int i, volatile atomic_t *v)
+ {
+ unsigned long flags;
+@@ -76,6 +88,11 @@ static inline void atomic_inc(volatile a
+ cris_atomic_restore(v, flags);
+ }
+
++static inline void atomic_inc_unchecked(volatile atomic_unchecked_t *v)
++{
++ atomic_inc((volatile atomic_t *)v);
++}
++
+ static inline void atomic_dec(volatile atomic_t *v)
+ {
+ unsigned long flags;
+diff -urNp linux-2.6.31.7/arch/frv/include/asm/atomic.h linux-2.6.31.7/arch/frv/include/asm/atomic.h
+--- linux-2.6.31.7/arch/frv/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/frv/include/asm/atomic.h 2009-12-08 17:39:42.728663883 -0500
+@@ -37,7 +37,9 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v, i) (((v)->counter) = (i))
++#define atomic_set_unchecked(v, i) (((v)->counter) = (i))
+
+ #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -99,16 +101,31 @@ static inline void atomic_add(int i, ato
+ atomic_add_return(i, v);
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+ atomic_sub_return(i, v);
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_inc(atomic_t *v)
+ {
+ atomic_add_return(1, v);
+ }
+
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_return(1, (atomic_t *)v);
++}
++
+ static inline void atomic_dec(atomic_t *v)
+ {
+ atomic_sub_return(1, v);
+diff -urNp linux-2.6.31.7/arch/frv/include/asm/kmap_types.h linux-2.6.31.7/arch/frv/include/asm/kmap_types.h
+--- linux-2.6.31.7/arch/frv/include/asm/kmap_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/frv/include/asm/kmap_types.h 2009-12-08 17:39:42.728663883 -0500
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.31.7/arch/h8300/include/asm/atomic.h linux-2.6.31.7/arch/h8300/include/asm/atomic.h
+--- linux-2.6.31.7/arch/h8300/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/h8300/include/asm/atomic.h 2009-12-08 17:39:42.728663883 -0500
+@@ -11,7 +11,9 @@
+ #define ATOMIC_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+
+ #include <asm/system.h>
+ #include <linux/kernel.h>
+@@ -25,7 +27,13 @@ static __inline__ int atomic_add_return(
+ return ret;
+ }
+
++static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_return(i, (atomic_t *)v);
++}
++
+ #define atomic_add(i, v) atomic_add_return(i, v)
++#define atomic_add_unchecked(i, v) atomic_add_return_unchecked(i, v)
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+ static __inline__ int atomic_sub_return(int i, atomic_t *v)
+@@ -37,7 +45,13 @@ static __inline__ int atomic_sub_return(
+ return ret;
+ }
+
++static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_sub_return(i, (atomic_t *)v);
++}
++
+ #define atomic_sub(i, v) atomic_sub_return(i, v)
++#define atomic_sub_unchecked(i, v) atomic_sub_return_unchecked(i, v)
+ #define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
+
+ static __inline__ int atomic_inc_return(atomic_t *v)
+@@ -50,7 +64,13 @@ static __inline__ int atomic_inc_return(
+ return ret;
+ }
+
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return((atomic_t *)v);
++}
++
+ #define atomic_inc(v) atomic_inc_return(v)
++#define atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
+
+ /*
+ * atomic_inc_and_test - increment and test
+diff -urNp linux-2.6.31.7/arch/ia64/ia32/binfmt_elf32.c linux-2.6.31.7/arch/ia64/ia32/binfmt_elf32.c
+--- linux-2.6.31.7/arch/ia64/ia32/binfmt_elf32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/ia32/binfmt_elf32.c 2009-12-08 17:39:42.730143244 -0500
+@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
+
+ #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+@@ -69,11 +76,11 @@ ia32_install_gate_page (struct vm_area_s
+ }
+
+
+-static struct vm_operations_struct ia32_shared_page_vm_ops = {
++static const struct vm_operations_struct ia32_shared_page_vm_ops = {
+ .fault = ia32_install_shared_page
+ };
+
+-static struct vm_operations_struct ia32_gate_page_vm_ops = {
++static const struct vm_operations_struct ia32_gate_page_vm_ops = {
+ .fault = ia32_install_gate_page
+ };
+
+diff -urNp linux-2.6.31.7/arch/ia64/ia32/ia32priv.h linux-2.6.31.7/arch/ia64/ia32/ia32priv.h
+--- linux-2.6.31.7/arch/ia64/ia32/ia32priv.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/ia32/ia32priv.h 2009-12-08 17:39:42.730143244 -0500
+@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
+ #define ELF_DATA ELFDATA2LSB
+ #define ELF_ARCH EM_386
+
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
++
+ #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
+ #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
+
+diff -urNp linux-2.6.31.7/arch/ia64/include/asm/atomic.h linux-2.6.31.7/arch/ia64/include/asm/atomic.h
+--- linux-2.6.31.7/arch/ia64/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/include/asm/atomic.h 2009-12-08 17:39:42.730143244 -0500
+@@ -22,9 +22,11 @@
+ #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic64_read(v) ((v)->counter)
+
+ #define atomic_set(v,i) (((v)->counter) = (i))
++#define atomic_set_unchecked(v,i) (((v)->counter) = (i))
+ #define atomic64_set(v,i) (((v)->counter) = (i))
+
+ static __inline__ int
+@@ -201,8 +203,11 @@ atomic64_add_negative (__s64 i, atomic64
+ #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
+
+ #define atomic_add(i,v) atomic_add_return((i), (v))
++#define atomic_add_unchecked(i,v) atomic_add((i), (atomic_t *)(v))
+ #define atomic_sub(i,v) atomic_sub_return((i), (v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i), (atomic_t *)(v))
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_inc((atomic_t *)(v))
+ #define atomic_dec(v) atomic_sub(1, (v))
+
+ #define atomic64_add(i,v) atomic64_add_return((i), (v))
+diff -urNp linux-2.6.31.7/arch/ia64/include/asm/elf.h linux-2.6.31.7/arch/ia64/include/asm/elf.h
+--- linux-2.6.31.7/arch/ia64/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/include/asm/elf.h 2009-12-08 17:39:42.731144648 -0500
+@@ -43,6 +43,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff -urNp linux-2.6.31.7/arch/ia64/include/asm/pgtable.h linux-2.6.31.7/arch/ia64/include/asm/pgtable.h
+--- linux-2.6.31.7/arch/ia64/include/asm/pgtable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/include/asm/pgtable.h 2009-12-08 17:39:42.731144648 -0500
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urNp linux-2.6.31.7/arch/ia64/include/asm/uaccess.h linux-2.6.31.7/arch/ia64/include/asm/uaccess.h
+--- linux-2.6.31.7/arch/ia64/include/asm/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/include/asm/uaccess.h 2009-12-08 17:39:42.731144648 -0500
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff -urNp linux-2.6.31.7/arch/ia64/kernel/module.c linux-2.6.31.7/arch/ia64/kernel/module.c
+--- linux-2.6.31.7/arch/ia64/kernel/module.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/kernel/module.c 2009-12-08 17:39:42.732145221 -0500
+@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff -urNp linux-2.6.31.7/arch/ia64/kernel/sys_ia64.c linux-2.6.31.7/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.31.7/arch/ia64/kernel/sys_ia64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/kernel/sys_ia64.c 2009-12-08 17:39:42.732145221 -0500
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+diff -urNp linux-2.6.31.7/arch/ia64/kernel/topology.c linux-2.6.31.7/arch/ia64/kernel/topology.c
+--- linux-2.6.31.7/arch/ia64/kernel/topology.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/kernel/topology.c 2009-12-08 17:39:42.733141458 -0500
+@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
+ return ret;
+ }
+
+-static struct sysfs_ops cache_sysfs_ops = {
++static const struct sysfs_ops cache_sysfs_ops = {
+ .show = cache_show
+ };
+
+diff -urNp linux-2.6.31.7/arch/ia64/kernel/vmlinux.lds.S linux-2.6.31.7/arch/ia64/kernel/vmlinux.lds.S
+--- linux-2.6.31.7/arch/ia64/kernel/vmlinux.lds.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/kernel/vmlinux.lds.S 2009-12-08 17:39:42.733141458 -0500
+@@ -245,7 +245,7 @@ SECTIONS
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
+ * into percpu page size
+ */
+diff -urNp linux-2.6.31.7/arch/ia64/mm/fault.c linux-2.6.31.7/arch/ia64/mm/fault.c
+--- linux-2.6.31.7/arch/ia64/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/mm/fault.c 2009-12-08 17:39:42.733141458 -0500
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.31.7/arch/ia64/mm/init.c linux-2.6.31.7/arch/ia64/mm/init.c
+--- linux-2.6.31.7/arch/ia64/mm/init.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/mm/init.c 2009-12-08 17:39:42.734143875 -0500
+@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff -urNp linux-2.6.31.7/arch/ia64/pci/pci.c linux-2.6.31.7/arch/ia64/pci/pci.c
+--- linux-2.6.31.7/arch/ia64/pci/pci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/ia64/pci/pci.c 2009-12-08 17:39:42.734143875 -0500
+@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsig
+ if ((seg | reg) <= 255) {
+ addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
+ mode = 0;
+- } else {
++ } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
+ addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
+ mode = 1;
++ } else {
++ return -EINVAL;
+ }
++
+ result = ia64_sal_pci_config_read(addr, mode, len, &data);
+ if (result != 0)
+ return -EINVAL;
+@@ -80,10 +83,13 @@ int raw_pci_write(unsigned int seg, unsi
+ if ((seg | reg) <= 255) {
+ addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
+ mode = 0;
+- } else {
++ } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
+ addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
+ mode = 1;
++ } else {
++ return -EINVAL;
+ }
++
+ result = ia64_sal_pci_config_write(addr, mode, len, value);
+ if (result != 0)
+ return -EINVAL;
+diff -urNp linux-2.6.31.7/arch/m32r/include/asm/atomic.h linux-2.6.31.7/arch/m32r/include/asm/atomic.h
+--- linux-2.6.31.7/arch/m32r/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/m32r/include/asm/atomic.h 2009-12-08 17:39:42.735145259 -0500
+@@ -29,6 +29,14 @@
+ #define atomic_read(v) ((v)->counter)
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++#define atomic_read_unchecked(v) ((v)->counter)
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -38,6 +46,15 @@
+ #define atomic_set(v,i) (((v)->counter) = (i))
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++#define atomic_set_unchecked(v,i) (((v)->counter) = (i))
++
++/**
+ * atomic_add_return - add integer to atomic variable and return it
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -308,6 +325,10 @@ static __inline__ void atomic_set_mask(u
+ local_irq_restore(flags);
+ }
+
++#define atomic_inc_unchecked(v) atomic_inc((atomic_t *)(v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(atomic_t *)(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(atomic_t *)(v))
++
+ /* Atomic operations are already serializing on m32r */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -urNp linux-2.6.31.7/arch/m32r/lib/usercopy.c linux-2.6.31.7/arch/m32r/lib/usercopy.c
+--- linux-2.6.31.7/arch/m32r/lib/usercopy.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/m32r/lib/usercopy.c 2009-12-08 17:39:42.735145259 -0500
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff -urNp linux-2.6.31.7/arch/m68k/include/asm/atomic_mm.h linux-2.6.31.7/arch/m68k/include/asm/atomic_mm.h
+--- linux-2.6.31.7/arch/m68k/include/asm/atomic_mm.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/m68k/include/asm/atomic_mm.h 2009-12-08 17:39:42.735145259 -0500
+@@ -16,23 +16,40 @@
+ #define ATOMIC_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+ __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+ __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ static inline void atomic_inc(atomic_t *v)
+ {
+ __asm__ __volatile__("addql #1,%0" : "+m" (*v));
+ }
+
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_inc((atomic_t *)v);
++}
++
+ static inline void atomic_dec(atomic_t *v)
+ {
+ __asm__ __volatile__("subql #1,%0" : "+m" (*v));
+diff -urNp linux-2.6.31.7/arch/m68k/include/asm/atomic_no.h linux-2.6.31.7/arch/m68k/include/asm/atomic_no.h
+--- linux-2.6.31.7/arch/m68k/include/asm/atomic_no.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/m68k/include/asm/atomic_no.h 2009-12-08 17:39:42.736144638 -0500
+@@ -16,7 +16,9 @@
+ #define ATOMIC_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+
+ static __inline__ void atomic_add(int i, atomic_t *v)
+ {
+@@ -27,6 +29,11 @@ static __inline__ void atomic_add(int i,
+ #endif
+ }
+
++static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ static __inline__ void atomic_sub(int i, atomic_t *v)
+ {
+ #ifdef CONFIG_COLDFIRE
+@@ -36,6 +43,11 @@ static __inline__ void atomic_sub(int i,
+ #endif
+ }
+
++static __inline__ void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
+ {
+ char c;
+@@ -56,6 +68,11 @@ static __inline__ void atomic_inc(volati
+ __asm__ __volatile__("addql #1,%0" : "+m" (*v));
+ }
+
++static __inline__ void atomic_inc_unchecked(volatile atomic_unchecked_t *v)
++{
++ atomic_inc((volatile atomic_t *)v);
++}
++
+ /*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+diff -urNp linux-2.6.31.7/arch/mips/include/asm/atomic.h linux-2.6.31.7/arch/mips/include/asm/atomic.h
+--- linux-2.6.31.7/arch/mips/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/include/asm/atomic.h 2009-12-08 17:39:42.736144638 -0500
+@@ -32,6 +32,14 @@
+ #define atomic_read(v) ((v)->counter)
+
+ /*
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++#define atomic_read_unchecked(v) ((v)->counter)
++
++/*
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -41,6 +49,15 @@
+ #define atomic_set(v, i) ((v)->counter = (i))
+
+ /*
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++#define atomic_set_unchecked(v, i) ((v)->counter = (i))
++
++/*
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -381,6 +398,9 @@ static __inline__ int atomic_add_unless(
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_inc((atomic_t *)(v))
++#define atomic_add_unchecked(i, v) atomic_add((i), (atomic_t *)(v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (atomic_t *)(v))
+
+ /*
+ * atomic_dec - decrement and test
+diff -urNp linux-2.6.31.7/arch/mips/include/asm/elf.h linux-2.6.31.7/arch/mips/include/asm/elf.h
+--- linux-2.6.31.7/arch/mips/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/include/asm/elf.h 2009-12-08 17:39:42.736876781 -0500
+@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #endif /* _ASM_ELF_H */
+diff -urNp linux-2.6.31.7/arch/mips/include/asm/page.h linux-2.6.31.7/arch/mips/include/asm/page.h
+--- linux-2.6.31.7/arch/mips/include/asm/page.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/include/asm/page.h 2009-12-08 17:39:42.736876781 -0500
+@@ -92,7 +92,7 @@ extern void copy_user_highpage(struct pa
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff -urNp linux-2.6.31.7/arch/mips/include/asm/system.h linux-2.6.31.7/arch/mips/include/asm/system.h
+--- linux-2.6.31.7/arch/mips/include/asm/system.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/include/asm/system.h 2009-12-08 17:39:42.737850095 -0500
+@@ -217,6 +217,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ALMASK)
+
+ #endif /* _ASM_SYSTEM_H */
+diff -urNp linux-2.6.31.7/arch/mips/kernel/binfmt_elfn32.c linux-2.6.31.7/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.31.7/arch/mips/kernel/binfmt_elfn32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/kernel/binfmt_elfn32.c 2009-12-08 17:39:42.737850095 -0500
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -urNp linux-2.6.31.7/arch/mips/kernel/binfmt_elfo32.c linux-2.6.31.7/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.31.7/arch/mips/kernel/binfmt_elfo32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/kernel/binfmt_elfo32.c 2009-12-08 17:39:42.737850095 -0500
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff -urNp linux-2.6.31.7/arch/mips/kernel/process.c linux-2.6.31.7/arch/mips/kernel/process.c
+--- linux-2.6.31.7/arch/mips/kernel/process.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/kernel/process.c 2009-12-08 17:39:42.737850095 -0500
+@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff -urNp linux-2.6.31.7/arch/mips/kernel/syscall.c linux-2.6.31.7/arch/mips/kernel/syscall.c
+--- linux-2.6.31.7/arch/mips/kernel/syscall.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/kernel/syscall.c 2009-12-08 17:39:42.739964825 -0500
+@@ -99,6 +99,11 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -109,7 +114,7 @@ unsigned long arch_get_unmapped_area(str
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+diff -urNp linux-2.6.31.7/arch/mips/mm/fault.c linux-2.6.31.7/arch/mips/mm/fault.c
+--- linux-2.6.31.7/arch/mips/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mips/mm/fault.c 2009-12-08 17:39:42.739964825 -0500
+@@ -26,6 +26,23 @@
+ #include <asm/ptrace.h>
+ #include <asm/highmem.h> /* For VMALLOC_END */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urNp linux-2.6.31.7/arch/mn10300/include/asm/atomic.h linux-2.6.31.7/arch/mn10300/include/asm/atomic.h
+--- linux-2.6.31.7/arch/mn10300/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mn10300/include/asm/atomic.h 2009-12-08 17:39:42.740861181 -0500
+@@ -34,6 +34,15 @@
+ #define atomic_read(v) ((v)->counter)
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v. Note that the guaranteed
++ * useful range of an atomic_unchecked_t is only 24 bits.
++ */
++#define atomic_read_unchecked(v) ((v)->counter)
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -43,6 +52,16 @@
+ */
+ #define atomic_set(v, i) (((v)->counter) = (i))
+
++/**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i. Note that the guaranteed
++ * useful range of an atomic_unchecked_t is only 24 bits.
++ */
++#define atomic_set_unchecked(v, i) (((v)->counter) = (i))
++
+ #include <asm/system.h>
+
+ /**
+@@ -99,16 +118,31 @@ static inline void atomic_add(int i, ato
+ atomic_add_return(i, v);
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+ atomic_sub_return(i, v);
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_inc(atomic_t *v)
+ {
+ atomic_add_return(1, v);
+ }
+
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_return(1, (atomic_t *)v);
++}
++
+ static inline void atomic_dec(atomic_t *v)
+ {
+ atomic_sub_return(1, v);
+diff -urNp linux-2.6.31.7/arch/mn10300/kernel/setup.c linux-2.6.31.7/arch/mn10300/kernel/setup.c
+--- linux-2.6.31.7/arch/mn10300/kernel/setup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/mn10300/kernel/setup.c 2009-12-08 17:39:42.740861181 -0500
+@@ -285,7 +285,7 @@ static void c_stop(struct seq_file *m, v
+ {
+ }
+
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+diff -urNp linux-2.6.31.7/arch/parisc/include/asm/atomic.h linux-2.6.31.7/arch/parisc/include/asm/atomic.h
+--- linux-2.6.31.7/arch/parisc/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/include/asm/atomic.h 2009-12-08 17:39:42.741680322 -0500
+@@ -177,6 +177,18 @@ static __inline__ int __atomic_add_retur
+ return ret;
+ }
+
++static __inline__ int __atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ int ret;
++ unsigned long flags;
++ _atomic_spin_lock_irqsave(v, flags);
++
++ ret = (v->counter += i);
++
++ _atomic_spin_unlock_irqrestore(v, flags);
++ return ret;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ unsigned long flags;
+@@ -187,11 +199,26 @@ static __inline__ void atomic_set(atomic
+ _atomic_spin_unlock_irqrestore(v, flags);
+ }
+
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ unsigned long flags;
++ _atomic_spin_lock_irqsave(v, flags);
++
++ v->counter = i;
++
++ _atomic_spin_unlock_irqrestore(v, flags);
++}
++
+ static __inline__ int atomic_read(const atomic_t *v)
+ {
+ return v->counter;
+ }
+
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
+ /* exported interface */
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+@@ -223,8 +250,11 @@ static __inline__ int atomic_add_unless(
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+ #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
++#define atomic_add_unchecked(i,v) ((void)(__atomic_add_return_unchecked( ((i),(v))))
+ #define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
++#define atomic_sub_unchecked(i,v) ((void)(__atomic_add_return_unchecked(-(i),(v))))
+ #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
++#define atomic_inc_unchecked(v) ((void)(__atomic_add_return_unchecked( 1,(v))))
+ #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
+
+ #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
+diff -urNp linux-2.6.31.7/arch/parisc/include/asm/elf.h linux-2.6.31.7/arch/parisc/include/asm/elf.h
+--- linux-2.6.31.7/arch/parisc/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/include/asm/elf.h 2009-12-08 17:39:42.741680322 -0500
+@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urNp linux-2.6.31.7/arch/parisc/include/asm/pgtable.h linux-2.6.31.7/arch/parisc/include/asm/pgtable.h
+--- linux-2.6.31.7/arch/parisc/include/asm/pgtable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/include/asm/pgtable.h 2009-12-08 17:39:42.742705008 -0500
+@@ -207,6 +207,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urNp linux-2.6.31.7/arch/parisc/kernel/module.c linux-2.6.31.7/arch/parisc/kernel/module.c
+--- linux-2.6.31.7/arch/parisc/kernel/module.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/kernel/module.c 2009-12-08 17:39:42.742705008 -0500
+@@ -95,16 +95,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
+-
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff -urNp linux-2.6.31.7/arch/parisc/kernel/sys_parisc.c linux-2.6.31.7/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.31.7/arch/parisc/kernel/sys_parisc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/kernel/sys_parisc.c 2009-12-08 17:39:42.744153110 -0500
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff -urNp linux-2.6.31.7/arch/parisc/kernel/traps.c linux-2.6.31.7/arch/parisc/kernel/traps.c
+--- linux-2.6.31.7/arch/parisc/kernel/traps.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/kernel/traps.c 2009-12-08 17:39:42.744839768 -0500
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urNp linux-2.6.31.7/arch/parisc/mm/fault.c linux-2.6.31.7/arch/parisc/mm/fault.c
+--- linux-2.6.31.7/arch/parisc/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/parisc/mm/fault.c 2009-12-08 17:39:42.744839768 -0500
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/atomic.h linux-2.6.31.7/arch/powerpc/include/asm/atomic.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/atomic.h 2009-12-08 17:39:42.745803388 -0500
+@@ -24,11 +24,21 @@ static __inline__ int atomic_read(const
+ return t;
+ }
+
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return atomic_read((const atomic_t *)v);
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ atomic_set((atomic_t *)v, i);
++}
++
+ static __inline__ void atomic_add(int a, atomic_t *v)
+ {
+ int t;
+@@ -44,6 +54,11 @@ static __inline__ void atomic_add(int a,
+ : "cc");
+ }
+
++static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
++{
++ atomic_add(a, (atomic_t *)v);
++}
++
+ static __inline__ int atomic_add_return(int a, atomic_t *v)
+ {
+ int t;
+@@ -80,6 +95,11 @@ static __inline__ void atomic_sub(int a,
+ : "cc");
+ }
+
++static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
++{
++ atomic_sub(a, (atomic_t *)v);
++}
++
+ static __inline__ int atomic_sub_return(int a, atomic_t *v)
+ {
+ int t;
+@@ -114,6 +134,11 @@ static __inline__ void atomic_inc(atomic
+ : "cc", "xer");
+ }
+
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_inc((atomic_t *)v);
++}
++
+ static __inline__ int atomic_inc_return(atomic_t *v)
+ {
+ int t;
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/elf.h linux-2.6.31.7/arch/powerpc/include/asm/elf.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/elf.h 2009-12-08 17:39:42.745803388 -0500
+@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -279,9 +290,6 @@ extern int arch_setup_additional_pages(s
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/kmap_types.h linux-2.6.31.7/arch/powerpc/include/asm/kmap_types.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/kmap_types.h 2009-12-08 17:29:51.577600232 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/kmap_types.h 2009-12-08 17:39:42.745803388 -0500
+@@ -26,6 +26,7 @@ enum km_type {
+ KM_SOFTIRQ1,
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/page_64.h linux-2.6.31.7/arch/powerpc/include/asm/page_64.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/page_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/page_64.h 2009-12-08 17:39:42.746866494 -0500
+@@ -170,15 +170,18 @@ do { \
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/page.h linux-2.6.31.7/arch/powerpc/include/asm/page.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/page.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/page.h 2009-12-08 17:39:42.746866494 -0500
+@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/pte-common.h linux-2.6.31.7/arch/powerpc/include/asm/pte-common.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/pte-common.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/pte-common.h 2009-12-08 17:39:42.747669133 -0500
+@@ -121,11 +121,11 @@ extern unsigned long bad_call_to_PMD_PAG
+ */
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+ #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+
+ #define __P000 PAGE_NONE
+ #define __P001 PAGE_READONLY
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/pte-hash32.h linux-2.6.31.7/arch/powerpc/include/asm/pte-hash32.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/pte-hash32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/pte-hash32.h 2009-12-08 17:39:42.747669133 -0500
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_HWEXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/reg.h linux-2.6.31.7/arch/powerpc/include/asm/reg.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/reg.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/reg.h 2009-12-08 17:39:42.748708825 -0500
+@@ -195,6 +195,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff -urNp linux-2.6.31.7/arch/powerpc/include/asm/uaccess.h linux-2.6.31.7/arch/powerpc/include/asm/uaccess.h
+--- linux-2.6.31.7/arch/powerpc/include/asm/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/include/asm/uaccess.h 2009-12-08 17:39:42.748708825 -0500
+@@ -327,52 +327,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +350,9 @@ static inline unsigned long __copy_from_
+ if (ret == 0)
+ return 0;
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +379,9 @@ static inline unsigned long __copy_to_us
+ if (ret == 0)
+ return 0;
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +399,97 @@ static inline unsigned long __copy_to_us
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if (((long)n < 0) || (n > INT_MAX))
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if (((long)n < 0) || (n > INT_MAX))
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ if (unlikely(((long)n < 0) || (n > INT_MAX)))
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ if (unlikely(((long)n < 0) || (n > INT_MAX)))
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/cacheinfo.c linux-2.6.31.7/arch/powerpc/kernel/cacheinfo.c
+--- linux-2.6.31.7/arch/powerpc/kernel/cacheinfo.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/cacheinfo.c 2009-12-08 17:39:42.748708825 -0500
+@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
+ &cache_assoc_attr,
+ };
+
+-static struct sysfs_ops cache_index_ops = {
++static const struct sysfs_ops cache_index_ops = {
+ .show = cache_index_show,
+ };
+
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/module_32.c linux-2.6.31.7/arch/powerpc/kernel/module_32.c
+--- linux-2.6.31.7/arch/powerpc/kernel/module_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/module_32.c 2009-12-08 17:39:42.750146894 -0500
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/process.c linux-2.6.31.7/arch/powerpc/kernel/process.c
+--- linux-2.6.31.7/arch/powerpc/kernel/process.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/process.c 2009-12-08 17:39:42.750146894 -0500
+@@ -1147,36 +1147,3 @@ unsigned long arch_align_stack(unsigned
+ sp -= get_random_int() & ~PAGE_MASK;
+ return sp & ~0xf;
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/setup-common.c linux-2.6.31.7/arch/powerpc/kernel/setup-common.c
+--- linux-2.6.31.7/arch/powerpc/kernel/setup-common.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/setup-common.c 2009-12-08 17:39:42.750146894 -0500
+@@ -328,7 +328,7 @@ static void c_stop(struct seq_file *m, v
+ {
+ }
+
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/signal_32.c linux-2.6.31.7/arch/powerpc/kernel/signal_32.c
+--- linux-2.6.31.7/arch/powerpc/kernel/signal_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/signal_32.c 2009-12-08 17:39:42.751145536 -0500
+@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/signal_64.c linux-2.6.31.7/arch/powerpc/kernel/signal_64.c
+--- linux-2.6.31.7/arch/powerpc/kernel/signal_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/signal_64.c 2009-12-08 17:39:42.751145536 -0500
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/sys_ppc32.c linux-2.6.31.7/arch/powerpc/kernel/sys_ppc32.c
+--- linux-2.6.31.7/arch/powerpc/kernel/sys_ppc32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/sys_ppc32.c 2009-12-08 17:39:42.752147768 -0500
+@@ -552,10 +552,10 @@ asmlinkage long compat_sys_sysctl(struct
+ if (oldlenp) {
+ if (!error) {
+ if (get_user(oldlen, oldlenp) ||
+- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
++ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
++ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
+ error = -EFAULT;
+ }
+- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+ }
+ return error;
+ }
+diff -urNp linux-2.6.31.7/arch/powerpc/kernel/vdso.c linux-2.6.31.7/arch/powerpc/kernel/vdso.c
+--- linux-2.6.31.7/arch/powerpc/kernel/vdso.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kernel/vdso.c 2009-12-08 17:39:42.752147768 -0500
+@@ -35,6 +35,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -211,7 +212,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -228,7 +229,7 @@ int arch_setup_additional_pages(struct l
+ */
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+- vdso_pages << PAGE_SHIFT, 0, 0);
++ vdso_pages << PAGE_SHIFT, 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff -urNp linux-2.6.31.7/arch/powerpc/kvm/timing.c linux-2.6.31.7/arch/powerpc/kvm/timing.c
+--- linux-2.6.31.7/arch/powerpc/kvm/timing.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/kvm/timing.c 2009-12-08 17:39:42.753091855 -0500
+@@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struc
+ return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+ }
+
+-static struct file_operations kvmppc_exit_timing_fops = {
++static const struct file_operations kvmppc_exit_timing_fops = {
+ .owner = THIS_MODULE,
+ .open = kvmppc_exit_timing_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/arch/powerpc/lib/usercopy_64.c linux-2.6.31.7/arch/powerpc/lib/usercopy_64.c
+--- linux-2.6.31.7/arch/powerpc/lib/usercopy_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/lib/usercopy_64.c 2009-12-08 17:39:42.753091855 -0500
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff -urNp linux-2.6.31.7/arch/powerpc/mm/fault.c linux-2.6.31.7/arch/powerpc/mm/fault.c
+--- linux-2.6.31.7/arch/powerpc/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/mm/fault.c 2009-12-08 17:39:42.753721239 -0500
+@@ -30,6 +30,10 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/perf_counter.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -40,6 +44,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
++#include <asm/ptrace.h>
+
+
+ #ifdef CONFIG_KPROBES
+@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -250,7 +282,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -265,7 +297,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -335,6 +367,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/arch/powerpc/mm/mmap_64.c linux-2.6.31.7/arch/powerpc/mm/mmap_64.c
+--- linux-2.6.31.7/arch/powerpc/mm/mmap_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/mm/mmap_64.c 2009-12-08 17:39:42.754738182 -0500
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.31.7/arch/powerpc/mm/slice.c linux-2.6.31.7/arch/powerpc/mm/slice.c
+--- linux-2.6.31.7/arch/powerpc/mm/slice.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/mm/slice.c 2009-12-08 17:39:42.754738182 -0500
+@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff -urNp linux-2.6.31.7/arch/powerpc/platforms/cell/spufs/file.c linux-2.6.31.7/arch/powerpc/platforms/cell/spufs/file.c
+--- linux-2.6.31.7/arch/powerpc/platforms/cell/spufs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/platforms/cell/spufs/file.c 2009-12-08 17:39:42.755850611 -0500
+@@ -147,7 +147,7 @@ static int __fops ## _open(struct inode
+ __simple_attr_check_format(__fmt, 0ull); \
+ return spufs_attr_open(inode, file, __get, __set, __fmt); \
+ } \
+-static struct file_operations __fops = { \
++static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = spufs_attr_release, \
+@@ -309,7 +309,7 @@ static int spufs_mem_mmap_access(struct
+ return len;
+ }
+
+-static struct vm_operations_struct spufs_mem_mmap_vmops = {
++static const struct vm_operations_struct spufs_mem_mmap_vmops = {
+ .fault = spufs_mem_mmap_fault,
+ .access = spufs_mem_mmap_access,
+ };
+@@ -436,7 +436,7 @@ static int spufs_cntl_mmap_fault(struct
+ return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_cntl_mmap_vmops = {
++static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
+ .fault = spufs_cntl_mmap_fault,
+ };
+
+@@ -1143,7 +1143,7 @@ spufs_signal1_mmap_fault(struct vm_area_
+ #endif
+ }
+
+-static struct vm_operations_struct spufs_signal1_mmap_vmops = {
++static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
+ .fault = spufs_signal1_mmap_fault,
+ };
+
+@@ -1279,7 +1279,7 @@ spufs_signal2_mmap_fault(struct vm_area_
+ #endif
+ }
+
+-static struct vm_operations_struct spufs_signal2_mmap_vmops = {
++static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
+ .fault = spufs_signal2_mmap_fault,
+ };
+
+@@ -1397,7 +1397,7 @@ spufs_mss_mmap_fault(struct vm_area_stru
+ return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_mss_mmap_vmops = {
++static const struct vm_operations_struct spufs_mss_mmap_vmops = {
+ .fault = spufs_mss_mmap_fault,
+ };
+
+@@ -1458,7 +1458,7 @@ spufs_psmap_mmap_fault(struct vm_area_st
+ return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_psmap_mmap_vmops = {
++static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
+ .fault = spufs_psmap_mmap_fault,
+ };
+
+@@ -1517,7 +1517,7 @@ spufs_mfc_mmap_fault(struct vm_area_stru
+ return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_mfc_mmap_vmops = {
++static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
+ .fault = spufs_mfc_mmap_fault,
+ };
+
+diff -urNp linux-2.6.31.7/arch/powerpc/platforms/pseries/dtl.c linux-2.6.31.7/arch/powerpc/platforms/pseries/dtl.c
+--- linux-2.6.31.7/arch/powerpc/platforms/pseries/dtl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/platforms/pseries/dtl.c 2009-12-08 17:39:42.755850611 -0500
+@@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file
+ return n_read * sizeof(struct dtl_entry);
+ }
+
+-static struct file_operations dtl_fops = {
++static const struct file_operations dtl_fops = {
+ .open = dtl_file_open,
+ .release = dtl_file_release,
+ .read = dtl_file_read,
+diff -urNp linux-2.6.31.7/arch/powerpc/platforms/pseries/hvCall_inst.c linux-2.6.31.7/arch/powerpc/platforms/pseries/hvCall_inst.c
+--- linux-2.6.31.7/arch/powerpc/platforms/pseries/hvCall_inst.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/powerpc/platforms/pseries/hvCall_inst.c 2009-12-08 17:39:42.755850611 -0500
+@@ -71,7 +71,7 @@ static int hc_show(struct seq_file *m, v
+ return 0;
+ }
+
+-static struct seq_operations hcall_inst_seq_ops = {
++static const struct seq_operations hcall_inst_seq_ops = {
+ .start = hc_start,
+ .next = hc_next,
+ .stop = hc_stop,
+diff -urNp linux-2.6.31.7/arch/s390/hypfs/inode.c linux-2.6.31.7/arch/s390/hypfs/inode.c
+--- linux-2.6.31.7/arch/s390/hypfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/s390/hypfs/inode.c 2009-12-08 17:39:42.757147934 -0500
+@@ -41,7 +41,7 @@ struct hypfs_sb_info {
+
+ static const struct file_operations hypfs_file_ops;
+ static struct file_system_type hypfs_type;
+-static struct super_operations hypfs_s_ops;
++static const struct super_operations hypfs_s_ops;
+
+ /* start of list of all dentries, which have to be deleted on update */
+ static struct dentry *hypfs_last_dentry;
+@@ -476,7 +476,7 @@ static struct file_system_type hypfs_typ
+ .kill_sb = hypfs_kill_super
+ };
+
+-static struct super_operations hypfs_s_ops = {
++static const struct super_operations hypfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = hypfs_drop_inode,
+ .show_options = hypfs_show_options,
+diff -urNp linux-2.6.31.7/arch/s390/include/asm/atomic.h linux-2.6.31.7/arch/s390/include/asm/atomic.h
+--- linux-2.6.31.7/arch/s390/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/s390/include/asm/atomic.h 2009-12-08 17:39:42.757147934 -0500
+@@ -71,19 +71,31 @@ static inline int atomic_read(const atom
+ return v->counter;
+ }
+
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return atomic_read((const atomic_t *)v);
++}
++
+ static inline void atomic_set(atomic_t *v, int i)
+ {
+ v->counter = i;
+ barrier();
+ }
+
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ atomic_set((atomic_t *)v, i);
++}
++
+ static __inline__ int atomic_add_return(int i, atomic_t * v)
+ {
+ return __CS_LOOP(v, i, "ar");
+ }
+ #define atomic_add(_i, _v) atomic_add_return(_i, _v)
++#define atomic_add_unchecked(_i, _v) atomic_add((_i), (atomic_t *)(_v))
+ #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+ #define atomic_inc(_v) atomic_add_return(1, _v)
++#define atomic_inc_unchecked(_v) atomic_inc((atomic_t *)(_v))
+ #define atomic_inc_return(_v) atomic_add_return(1, _v)
+ #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+
+@@ -92,6 +104,7 @@ static __inline__ int atomic_sub_return(
+ return __CS_LOOP(v, i, "sr");
+ }
+ #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
++#define atomic_sub_unchecked(_i, _v) atomic_sub((_i), (atomic_t *)(_v))
+ #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
+ #define atomic_dec(_v) atomic_sub_return(1, _v)
+ #define atomic_dec_return(_v) atomic_sub_return(1, _v)
+diff -urNp linux-2.6.31.7/arch/s390/include/asm/uaccess.h linux-2.6.31.7/arch/s390/include/asm/uaccess.h
+--- linux-2.6.31.7/arch/s390/include/asm/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/s390/include/asm/uaccess.h 2009-12-08 17:39:42.758147961 -0500
+@@ -232,6 +232,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -283,6 +290,10 @@ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else
+diff -urNp linux-2.6.31.7/arch/s390/kernel/module.c linux-2.6.31.7/arch/s390/kernel/module.c
+--- linux-2.6.31.7/arch/s390/kernel/module.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/s390/kernel/module.c 2009-12-08 17:39:42.758147961 -0500
+@@ -164,11 +164,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -254,7 +254,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -278,7 +278,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -292,7 +292,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -317,7 +317,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -339,7 +339,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -349,7 +349,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff -urNp linux-2.6.31.7/arch/sh/include/asm/atomic.h linux-2.6.31.7/arch/sh/include/asm/atomic.h
+--- linux-2.6.31.7/arch/sh/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sh/include/asm/atomic.h 2009-12-08 17:39:42.758147961 -0500
+@@ -14,7 +14,9 @@
+ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic_set(v,i) ((v)->counter = (i))
++#define atomic_set_unchecked(v,i) ((v)->counter = (i))
+
+ #if defined(CONFIG_GUSA_RB)
+ #include <asm/atomic-grb.h>
+@@ -43,6 +45,9 @@
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_inc((atomic_t *)(v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(atomic_t *)(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(atomic_t *)(v))
+ #define atomic_dec(v) atomic_sub(1,(v))
+
+ #if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
+diff -urNp linux-2.6.31.7/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.31.7/arch/sh/kernel/cpu/sh4/sq.c
+--- linux-2.6.31.7/arch/sh/kernel/cpu/sh4/sq.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sh/kernel/cpu/sh4/sq.c 2009-12-08 17:39:42.759148003 -0500
+@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
+ NULL,
+ };
+
+-static struct sysfs_ops sq_sysfs_ops = {
++static const struct sysfs_ops sq_sysfs_ops = {
+ .show = sq_sysfs_show,
+ .store = sq_sysfs_store,
+ };
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/atomic_32.h linux-2.6.31.7/arch/sparc/include/asm/atomic_32.h
+--- linux-2.6.31.7/arch/sparc/include/asm/atomic_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/atomic_32.h 2009-12-08 17:39:42.759148003 -0500
+@@ -24,12 +24,17 @@ extern int atomic_cmpxchg(atomic_t *, in
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+ extern int atomic_add_unless(atomic_t *, int, int);
+ extern void atomic_set(atomic_t *, int);
++extern void atomic_set_unchecked(atomic_unchecked_t *, int);
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+
+ #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
++#define atomic_add_unchecked(i, v) atomic_add((i), (atomic_t *)(v))
+ #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (atomic_t *)(v))
+ #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
++#define atomic_inc_unchecked(v) atomic_inc((atomic_t *)(v))
+ #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
+
+ #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/atomic_64.h linux-2.6.31.7/arch/sparc/include/asm/atomic_64.h
+--- linux-2.6.31.7/arch/sparc/include/asm/atomic_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/atomic_64.h 2009-12-08 17:39:42.760124181 -0500
+@@ -14,14 +14,18 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++#define atomic_read_unchecked(v) ((v)->counter)
+ #define atomic64_read(v) ((v)->counter)
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++#define atomic_set_unchecked(v, i) (((v)->counter) = i)
+ #define atomic64_set(v, i) (((v)->counter) = i)
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(int, atomic64_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(int, atomic64_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
+@@ -59,6 +63,7 @@ extern int atomic64_sub_ret(int, atomic6
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, v)
+ #define atomic64_inc(v) atomic64_add(1, v)
+
+ #define atomic_dec(v) atomic_sub(1, v)
+@@ -72,17 +77,28 @@ extern int atomic64_sub_ret(int, atomic6
+
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -93,17 +109,28 @@ static inline int atomic_add_unless(atom
+
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/elf_32.h linux-2.6.31.7/arch/sparc/include/asm/elf_32.h
+--- linux-2.6.31.7/arch/sparc/include/asm/elf_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/elf_32.h 2009-12-08 17:39:42.760124181 -0500
+@@ -116,6 +116,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/elf_64.h linux-2.6.31.7/arch/sparc/include/asm/elf_64.h
+--- linux-2.6.31.7/arch/sparc/include/asm/elf_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/elf_64.h 2009-12-08 17:39:42.760882486 -0500
+@@ -163,6 +163,12 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/pgtable_32.h linux-2.6.31.7/arch/sparc/include/asm/pgtable_32.h
+--- linux-2.6.31.7/arch/sparc/include/asm/pgtable_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/pgtable_32.h 2009-12-08 17:39:42.760882486 -0500
+@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.31.7/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-2.6.31.7/arch/sparc/include/asm/pgtsrmmu.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/pgtsrmmu.h 2009-12-08 17:39:42.761807329 -0500
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/spinlock_64.h linux-2.6.31.7/arch/sparc/include/asm/spinlock_64.h
+--- linux-2.6.31.7/arch/sparc/include/asm/spinlock_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/spinlock_64.h 2009-12-08 17:39:42.761807329 -0500
+@@ -99,7 +99,12 @@ static void inline __read_lock(raw_rwloc
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,7 +117,7 @@ static void inline __read_lock(raw_rwloc
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+ static int inline __read_trylock(raw_rwlock_t *lock)
+@@ -123,7 +128,12 @@ static int inline __read_trylock(raw_rwl
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -142,7 +152,12 @@ static void inline __read_unlock(raw_rwl
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/uaccess_32.h linux-2.6.31.7/arch/sparc/include/asm/uaccess_32.h
+--- linux-2.6.31.7/arch/sparc/include/asm/uaccess_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/uaccess_32.h 2009-12-08 17:39:42.761807329 -0500
+@@ -249,27 +249,49 @@ extern unsigned long __copy_user(void __
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff -urNp linux-2.6.31.7/arch/sparc/include/asm/uaccess_64.h linux-2.6.31.7/arch/sparc/include/asm/uaccess_64.h
+--- linux-2.6.31.7/arch/sparc/include/asm/uaccess_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/include/asm/uaccess_64.h 2009-12-08 17:39:42.761807329 -0500
+@@ -212,7 +212,15 @@ extern unsigned long copy_from_user_fixu
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
++
++ if (unlikely(((long)size > INT_MAX) || ((long)size < 0)))
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+@@ -228,7 +236,15 @@ extern unsigned long copy_to_user_fixup(
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
++
++ if (unlikely(((long)size > INT_MAX) || ((long)size < 0)))
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
++
++ ret = ___copy_to_user(to, from, size);
+
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+diff -urNp linux-2.6.31.7/arch/sparc/kernel/Makefile linux-2.6.31.7/arch/sparc/kernel/Makefile
+--- linux-2.6.31.7/arch/sparc/kernel/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/kernel/Makefile 2009-12-08 17:39:42.763148734 -0500
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -urNp linux-2.6.31.7/arch/sparc/kernel/sys_sparc_32.c linux-2.6.31.7/arch/sparc/kernel/sys_sparc_32.c
+--- linux-2.6.31.7/arch/sparc/kernel/sys_sparc_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/kernel/sys_sparc_32.c 2009-12-08 17:39:42.763148734 -0500
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+diff -urNp linux-2.6.31.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.31.7/arch/sparc/kernel/sys_sparc_64.c
+--- linux-2.6.31.7/arch/sparc/kernel/sys_sparc_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/kernel/sys_sparc_64.c 2009-12-08 17:39:42.764148100 -0500
+@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -153,9 +157,9 @@ unsigned long arch_get_unmapped_area(str
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -175,8 +179,8 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+@@ -216,7 +220,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -380,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -394,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.31.7/arch/sparc/kernel/traps_64.c linux-2.6.31.7/arch/sparc/kernel/traps_64.c
+--- linux-2.6.31.7/arch/sparc/kernel/traps_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/kernel/traps_64.c 2009-12-08 17:39:42.764777275 -0500
+@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+diff -urNp linux-2.6.31.7/arch/sparc/lib/atomic32.c linux-2.6.31.7/arch/sparc/lib/atomic32.c
+--- linux-2.6.31.7/arch/sparc/lib/atomic32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/lib/atomic32.c 2009-12-08 17:39:42.765659318 -0500
+@@ -80,6 +80,12 @@ void atomic_set(atomic_t *v, int i)
+ }
+ EXPORT_SYMBOL(atomic_set);
+
++void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ atomic_set((atomic_t *)v, i);
++}
++EXPORT_SYMBOL(atomic_set_unchecked);
++
+ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
+ {
+ unsigned long old, flags;
+diff -urNp linux-2.6.31.7/arch/sparc/lib/atomic_64.S linux-2.6.31.7/arch/sparc/lib/atomic_64.S
+--- linux-2.6.31.7/arch/sparc/lib/atomic_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/lib/atomic_64.S 2009-12-08 17:39:42.765659318 -0500
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -64,7 +109,12 @@ atomic_add_ret: /* %o0 = increment, %o1
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -80,7 +130,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -95,7 +150,12 @@ atomic64_add: /* %o0 = increment, %o1 =
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -110,7 +170,12 @@ atomic64_sub: /* %o0 = decrement, %o1 =
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -126,7 +191,12 @@ atomic64_add_ret: /* %o0 = increment, %o
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+diff -urNp linux-2.6.31.7/arch/sparc/lib/ksyms.c linux-2.6.31.7/arch/sparc/lib/ksyms.c
+--- linux-2.6.31.7/arch/sparc/lib/ksyms.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/lib/ksyms.c 2009-12-08 17:39:42.765659318 -0500
+@@ -144,8 +144,10 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
+ EXPORT_SYMBOL(atomic64_add_ret);
+diff -urNp linux-2.6.31.7/arch/sparc/lib/rwsem_64.S linux-2.6.31.7/arch/sparc/lib/rwsem_64.S
+--- linux-2.6.31.7/arch/sparc/lib/rwsem_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/lib/rwsem_64.S 2009-12-08 17:39:42.766796598 -0500
+@@ -11,7 +11,12 @@
+ .globl __down_read
+ __down_read:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -33,7 +38,12 @@ __down_read:
+ .globl __down_read_trylock
+ __down_read_trylock:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cmp %g7, 0
+ bl,pn %icc, 2f
+ mov 0, %o1
+@@ -51,7 +61,12 @@ __down_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -77,7 +92,12 @@ __down_write_trylock:
+ cmp %g3, 0
+ bne,pn %icc, 2f
+ mov 0, %o1
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -90,7 +110,12 @@ __down_write_trylock:
+ __up_read:
+ 1:
+ lduw [%o0], %g1
+- sub %g1, 1, %g7
++ subcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -118,7 +143,12 @@ __up_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -143,7 +173,12 @@ __downgrade_write:
+ or %g1, %lo(RWSEM_WAITING_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+diff -urNp linux-2.6.31.7/arch/sparc/Makefile linux-2.6.31.7/arch/sparc/Makefile
+--- linux-2.6.31.7/arch/sparc/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/Makefile 2009-12-08 17:39:42.759148003 -0500
+@@ -77,7 +77,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff -urNp linux-2.6.31.7/arch/sparc/mm/fault_32.c linux-2.6.31.7/arch/sparc/mm/fault_32.c
+--- linux-2.6.31.7/arch/sparc/mm/fault_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/mm/fault_32.c 2009-12-08 17:39:42.767863335 -0500
+@@ -21,6 +21,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -231,6 +495,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urNp linux-2.6.31.7/arch/sparc/mm/fault_64.c linux-2.6.31.7/arch/sparc/mm/fault_64.c
+--- linux-2.6.31.7/arch/sparc/mm/fault_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/mm/fault_64.c 2009-12-08 17:39:42.768709349 -0500
+@@ -20,6 +20,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -249,6 +252,416 @@ static void noinline bogus_32bit_fault_a
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int mov1, call, mov2;
++
++ err = get_user(mov1, (unsigned int *)regs->tpc);
++ err |= get_user(call, (unsigned int *)(regs->tpc+4));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if (mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+12));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020 &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or, (unsigned int *)(regs->tpc+12));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+16));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+20));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020 &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -315,6 +728,29 @@ asmlinkage void __kprobes do_sparc64_fau
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.31.7/arch/sparc/mm/init_32.c linux-2.6.31.7/arch/sparc/mm/init_32.c
+--- linux-2.6.31.7/arch/sparc/mm/init_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/mm/init_32.c 2009-12-08 17:39:42.768709349 -0500
+@@ -316,6 +316,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -341,17 +344,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urNp linux-2.6.31.7/arch/sparc/mm/Makefile linux-2.6.31.7/arch/sparc/mm/Makefile
+--- linux-2.6.31.7/arch/sparc/mm/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/mm/Makefile 2009-12-08 17:39:42.766796598 -0500
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff -urNp linux-2.6.31.7/arch/sparc/mm/srmmu.c linux-2.6.31.7/arch/sparc/mm/srmmu.c
+--- linux-2.6.31.7/arch/sparc/mm/srmmu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/sparc/mm/srmmu.c 2009-12-08 17:39:42.776967880 -0500
+@@ -2149,6 +2149,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff -urNp linux-2.6.31.7/arch/um/include/asm/kmap_types.h linux-2.6.31.7/arch/um/include/asm/kmap_types.h
+--- linux-2.6.31.7/arch/um/include/asm/kmap_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/um/include/asm/kmap_types.h 2009-12-08 17:39:42.776967880 -0500
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.31.7/arch/um/include/asm/page.h linux-2.6.31.7/arch/um/include/asm/page.h
+--- linux-2.6.31.7/arch/um/include/asm/page.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/um/include/asm/page.h 2009-12-08 17:39:42.778150758 -0500
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff -urNp linux-2.6.31.7/arch/um/sys-i386/syscalls.c linux-2.6.31.7/arch/um/sys-i386/syscalls.c
+--- linux-2.6.31.7/arch/um/sys-i386/syscalls.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/um/sys-i386/syscalls.c 2009-12-08 17:39:42.778150758 -0500
+@@ -11,6 +11,21 @@
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/i386 didn't use to be able to handle more than
+diff -urNp linux-2.6.31.7/arch/x86/boot/bitops.h linux-2.6.31.7/arch/x86/boot/bitops.h
+--- linux-2.6.31.7/arch/x86/boot/bitops.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/bitops.h 2009-12-08 17:39:42.793644779 -0500
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff -urNp linux-2.6.31.7/arch/x86/boot/boot.h linux-2.6.31.7/arch/x86/boot/boot.h
+--- linux-2.6.31.7/arch/x86/boot/boot.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/boot.h 2009-12-08 17:39:42.794961666 -0500
+@@ -82,7 +82,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/boot/compressed/head_32.S linux-2.6.31.7/arch/x86/boot/compressed/head_32.S
+--- linux-2.6.31.7/arch/x86/boot/compressed/head_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/compressed/head_32.S 2009-12-08 17:39:42.794961666 -0500
+@@ -75,7 +75,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -148,7 +148,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -156,8 +156,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff -urNp linux-2.6.31.7/arch/x86/boot/compressed/head_64.S linux-2.6.31.7/arch/x86/boot/compressed/head_64.S
+--- linux-2.6.31.7/arch/x86/boot/compressed/head_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/compressed/head_64.S 2009-12-08 17:39:42.794961666 -0500
+@@ -90,7 +90,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -233,7 +233,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff -urNp linux-2.6.31.7/arch/x86/boot/compressed/misc.c linux-2.6.31.7/arch/x86/boot/compressed/misc.c
+--- linux-2.6.31.7/arch/x86/boot/compressed/misc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/compressed/misc.c 2009-12-08 17:39:42.795841775 -0500
+@@ -288,7 +288,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff -urNp linux-2.6.31.7/arch/x86/boot/compressed/mkpiggy.c linux-2.6.31.7/arch/x86/boot/compressed/mkpiggy.c
+--- linux-2.6.31.7/arch/x86/boot/compressed/mkpiggy.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/compressed/mkpiggy.c 2009-12-08 17:39:42.795841775 -0500
+@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
+
+ offs = (olen > ilen) ? olen - ilen : 0;
+ offs += olen >> 12; /* Add 8 bytes for each 32K block */
+- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
++ offs += 64*1024; /* Add 64K bytes slack */
+ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
+
+ printf(".section \".rodata.compressed\",\"a\",@progbits\n");
+diff -urNp linux-2.6.31.7/arch/x86/boot/compressed/relocs.c linux-2.6.31.7/arch/x86/boot/compressed/relocs.c
+--- linux-2.6.31.7/arch/x86/boot/compressed/relocs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/compressed/relocs.c 2009-12-08 17:39:42.795841775 -0500
+@@ -10,8 +10,11 @@
+ #define USE_BSD
+ #include <endian.h>
+
++#include "../../../../include/linux/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
+
+ static int is_safe_abs_reloc(const char* sym_name)
+ {
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
+ if (!strcmp(sym_name, safe_abs_relocs[i]))
+@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
+ if (sym->st_shndx == SHN_ABS) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
+ if (r_type == R_386_NONE || r_type == R_386_PC32) {
+ /*
+ * NONE can be ignored and and PC relative
+@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -634,6 +693,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff -urNp linux-2.6.31.7/arch/x86/boot/cpucheck.c linux-2.6.31.7/arch/x86/boot/cpucheck.c
+--- linux-2.6.31.7/arch/x86/boot/cpucheck.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/cpucheck.c 2009-12-08 17:39:42.796995512 -0500
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff -urNp linux-2.6.31.7/arch/x86/boot/header.S linux-2.6.31.7/arch/x86/boot/header.S
+--- linux-2.6.31.7/arch/x86/boot/header.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/header.S 2009-12-08 17:39:42.797554746 -0500
+@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+ #define VO_INIT_SIZE (VO__end - VO__text)
+diff -urNp linux-2.6.31.7/arch/x86/boot/video-vesa.c linux-2.6.31.7/arch/x86/boot/video-vesa.c
+--- linux-2.6.31.7/arch/x86/boot/video-vesa.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/boot/video-vesa.c 2009-12-08 17:39:42.797554746 -0500
+@@ -205,6 +205,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/ia32/ia32_signal.c linux-2.6.31.7/arch/x86/ia32/ia32_signal.c
+--- linux-2.6.31.7/arch/x86/ia32/ia32_signal.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/ia32/ia32_signal.c 2009-12-08 17:39:42.797554746 -0500
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/alternative.h linux-2.6.31.7/arch/x86/include/asm/alternative.h
+--- linux-2.6.31.7/arch/x86/include/asm/alternative.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/alternative.h 2009-12-08 17:39:42.797554746 -0500
+@@ -87,7 +87,7 @@ const unsigned char *const *find_nop_tab
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/apm.h linux-2.6.31.7/arch/x86/include/asm/apm.h
+--- linux-2.6.31.7/arch/x86/include/asm/apm.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/apm.h 2009-12-08 17:39:42.798912490 -0500
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/atomic_32.h linux-2.6.31.7/arch/x86/include/asm/atomic_32.h
+--- linux-2.6.31.7/arch/x86/include/asm/atomic_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/atomic_32.h 2009-12-08 17:39:42.799652344 -0500
+@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -91,7 +167,30 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -103,7 +202,18 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (v->counter));
+ }
+
+@@ -119,7 +229,19 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -137,7 +259,19 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -156,7 +290,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -179,7 +322,15 @@ static inline int atomic_add_return(int
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -227,17 +378,28 @@ static inline int atomic_xchg(atomic_t *
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/atomic_64.h linux-2.6.31.7/arch/x86/include/asm/atomic_64.h
+--- linux-2.6.31.7/arch/x86/include/asm/atomic_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/atomic_64.h 2009-12-08 17:39:42.800653209 -0500
+@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_sub_unchecked - subtract the atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -90,7 +166,32 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -103,7 +204,19 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -120,7 +233,20 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -138,7 +264,20 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -157,7 +296,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -173,7 +321,15 @@ static inline int atomic_add_negative(in
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+ int __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -224,7 +380,15 @@ static inline void atomic64_set(atomic64
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addq %1,%0"
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -238,7 +402,15 @@ static inline void atomic64_add(long i,
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -256,7 +428,16 @@ static inline int atomic64_sub_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -270,7 +451,19 @@ static inline int atomic64_sub_and_test(
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incq %0"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -283,7 +476,19 @@ static inline void atomic64_inc(atomic64
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -300,7 +505,20 @@ static inline int atomic64_dec_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -318,7 +536,20 @@ static inline int atomic64_inc_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -337,7 +568,16 @@ static inline int atomic64_add_negative(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -353,7 +593,15 @@ static inline int atomic64_add_negative(
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -398,17 +646,29 @@ static inline long atomic_xchg(atomic_t
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -424,17 +684,29 @@ static inline int atomic_add_unless(atom
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "er" (a));
++
++ old = atomic64_cmpxchg((v), c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ /**
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/boot.h linux-2.6.31.7/arch/x86/include/asm/boot.h
+--- linux-2.6.31.7/arch/x86/include/asm/boot.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/boot.h 2009-12-08 17:39:42.800653209 -0500
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/cache.h linux-2.6.31.7/arch/x86/include/asm/cache.h
+--- linux-2.6.31.7/arch/x86/include/asm/cache.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/cache.h 2009-12-08 17:39:42.800653209 -0500
+@@ -6,6 +6,7 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
++#define __read_only __attribute__((__section__(".data.read_only")))
+
+ #ifdef CONFIG_X86_VSMP
+ /* vSMP Internode cacheline shift */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/checksum_32.h linux-2.6.31.7/arch/x86/include/asm/checksum_32.h
+--- linux-2.6.31.7/arch/x86/include/asm/checksum_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/checksum_32.h 2009-12-08 17:39:42.800653209 -0500
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/desc.h linux-2.6.31.7/arch/x86/include/asm/desc.h
+--- linux-2.6.31.7/arch/x86/include/asm/desc.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/desc.h 2009-12-08 17:39:42.801656442 -0500
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+
+ static inline void fill_ldt(struct desc_struct *desc,
+@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str
+ static inline void native_write_idt_entry(gate_desc *idt, int entry,
+ const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
+ const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+ const void *desc, int type)
+ {
+ unsigned int size;
++
+ switch (type) {
+ case DESC_TSS:
+ size = sizeof(tss_desc);
+@@ -139,7 +142,10 @@ static inline void native_write_gdt_entr
+ size = sizeof(struct desc_struct);
+ break;
+ }
++
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -211,7 +217,9 @@ static inline void native_set_ldt(const
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -246,8 +254,10 @@ static inline void native_load_tls(struc
+ unsigned int i;
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -379,4 +389,16 @@ static inline void set_system_intr_gate_
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/device.h linux-2.6.31.7/arch/x86/include/asm/device.h
+--- linux-2.6.31.7/arch/x86/include/asm/device.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/device.h 2009-12-08 17:39:42.801656442 -0500
+@@ -6,7 +6,7 @@ struct dev_archdata {
+ void *acpi_handle;
+ #endif
+ #ifdef CONFIG_X86_64
+-struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ #endif
+ #ifdef CONFIG_DMAR
+ void *iommu; /* hook for IOMMU specific extension */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/dma-mapping.h linux-2.6.31.7/arch/x86/include/asm/dma-mapping.h
+--- linux-2.6.31.7/arch/x86/include/asm/dma-mapping.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/dma-mapping.h 2009-12-08 17:39:42.801656442 -0500
+@@ -19,9 +19,9 @@ extern int iommu_merge;
+ extern struct device x86_dma_fallback_dev;
+ extern int panic_on_overflow;
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #ifdef CONFIG_X86_32
+ return dma_ops;
+@@ -38,7 +38,7 @@ static inline struct dma_map_ops *get_dm
+ /* Make sure we keep the same behaviour */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -98,7 +98,7 @@ static inline void *
+ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+@@ -127,7 +127,7 @@ dma_alloc_coherent(struct device *dev, s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/e820.h linux-2.6.31.7/arch/x86/include/asm/e820.h
+--- linux-2.6.31.7/arch/x86/include/asm/e820.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/e820.h 2009-12-08 17:39:42.802651887 -0500
+@@ -135,7 +135,7 @@ extern char *memory_setup(void);
+ #define ISA_END_ADDRESS 0x100000
+ #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #ifdef __KERNEL__
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/elf.h linux-2.6.31.7/arch/x86/include/asm/elf.h
+--- linux-2.6.31.7/arch/x86/include/asm/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/elf.h 2009-12-08 17:39:42.802651887 -0500
+@@ -263,7 +263,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : 32)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : 32)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -317,8 +335,7 @@ do { \
+ #define ARCH_DLINFO \
+ do { \
+ if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -329,7 +346,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -343,7 +360,4 @@ extern int arch_setup_additional_pages(s
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_X86_ELF_H */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/futex.h linux-2.6.31.7/arch/x86/include/asm/futex.h
+--- linux-2.6.31.7/arch/x86/include/asm/futex.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/futex.h 2009-12-08 17:39:42.802651887 -0500
+@@ -11,6 +11,40 @@
+ #include <asm/processor.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_X86_32
++#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ asm volatile( \
++ "movw\t%w6, %%ds\n" \
++ "1:\t" insn "\n" \
++ "2:\tpushl\t%%ss\n" \
++ "\tpopl\t%%ds\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "3:\tmov\t%3, %1\n" \
++ "\tjmp\t2b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 3b) \
++ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
++
++#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ asm volatile("movw\t%w7, %%es\n" \
++ "1:\tmovl\t%%es:%2, %0\n" \
++ "\tmovl\t%0, %3\n" \
++ "\t" insn "\n" \
++ "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
++ "\tjnz\t1b\n" \
++ "3:\tpushl\t%%ss\n" \
++ "\tpopl\t%%es\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "4:\tmov\t%5, %1\n" \
++ "\tjmp\t3b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 4b) \
++ _ASM_EXTABLE(2b, 4b) \
++ : "=&a" (oldval), "=&r" (ret), \
++ "+m" (*uaddr), "=&r" (tem) \
++ : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
++#else
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+@@ -36,8 +70,9 @@
+ : "=&a" (oldval), "=&r" (ret), \
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
++#endif
+
+-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
++static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+ {
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+@@ -61,11 +96,20 @@ static inline int futex_atomic_op_inuser
+
+ switch (op) {
+ case FUTEX_OP_SET:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
++#else
+ __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_ADD:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
++ uaddr, oparg);
++#else
+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
+@@ -109,7 +153,7 @@ static inline int futex_atomic_op_inuser
+ return ret;
+ }
+
+-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
++static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
+ int newval)
+ {
+
+@@ -122,14 +166,27 @@ static inline int futex_atomic_cmpxchg_i
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ asm volatile(
++#ifdef CONFIG_X86_32
++ "\tmovw %w5, %%ds\n"
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ "2:\tpushl %%ss\n"
++ "\tpopl %%ds\n"
++ "\t.section .fixup, \"ax\"\n"
++#else
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
+ "2:\t.section .fixup, \"ax\"\n"
++#endif
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (oldval), "+m" (*uaddr)
++#ifdef CONFIG_X86_32
++ : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
++#else
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
++#endif
+ : "memory"
+ );
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/i387.h linux-2.6.31.7/arch/x86/include/asm/i387.h
+--- linux-2.6.31.7/arch/x86/include/asm/i387.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/i387.h 2009-12-08 17:39:42.803662793 -0500
+@@ -194,13 +194,8 @@ static inline int fxrstor_checking(struc
+ }
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/io_64.h linux-2.6.31.7/arch/x86/include/asm/io_64.h
+--- linux-2.6.31.7/arch/x86/include/asm/io_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/io_64.h 2009-12-08 17:39:42.803662793 -0500
+@@ -140,6 +140,17 @@ __OUTS(l)
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range (unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range (unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ #include <asm-generic/iomap.h>
+
+ void __memcpy_fromio(void *, unsigned long, unsigned);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/iommu.h linux-2.6.31.7/arch/x86/include/asm/iommu.h
+--- linux-2.6.31.7/arch/x86/include/asm/iommu.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/iommu.h 2009-12-08 17:39:42.804837527 -0500
+@@ -3,7 +3,7 @@
+
+ extern void pci_iommu_shutdown(void);
+ extern void no_iommu_init(void);
+-extern struct dma_map_ops nommu_dma_ops;
++extern const struct dma_map_ops nommu_dma_ops;
+ extern int force_iommu, no_iommu;
+ extern int iommu_detected;
+ extern int iommu_pass_through;
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/irqflags.h linux-2.6.31.7/arch/x86/include/asm/irqflags.h
+--- linux-2.6.31.7/arch/x86/include/asm/irqflags.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/irqflags.h 2009-12-08 17:39:42.805678807 -0500
+@@ -147,6 +147,27 @@ static inline unsigned long __raw_local_
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+ #define GET_CR0_INTO_EAX movl %cr0, %eax
++
++/* PaX: special register usage in entry_32.S, beware */
++#ifdef CONFIG_PAX_KERNEXEC
++#define PAX_EXIT_KERNEL \
++ bt $16, %esi; \
++ jc 1f; \
++ movl %esi, %cr0; \
++1:
++
++#define PAX_ENTER_KERNEL \
++ movl %cr0, %esi; \
++ movl %esi, %edx; \
++ bts $16, %edx; \
++ jc 1f; \
++ movl %edx, %cr0; \
++1:
++#else
++#define PAX_EXIT_KERNEL
++#define PAX_ENTER_KERNEL
++#endif
++
+ #endif
+
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/kvm_host.h linux-2.6.31.7/arch/x86/include/asm/kvm_host.h
+--- linux-2.6.31.7/arch/x86/include/asm/kvm_host.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/kvm_host.h 2009-12-08 17:39:42.805678807 -0500
+@@ -528,7 +528,7 @@ struct kvm_x86_ops {
+ u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ };
+
+-extern struct kvm_x86_ops *kvm_x86_ops;
++extern const struct kvm_x86_ops *kvm_x86_ops;
+
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/local.h linux-2.6.31.7/arch/x86/include/asm/local.h
+--- linux-2.6.31.7/arch/x86/include/asm/local.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/local.h 2009-12-08 17:39:42.805678807 -0500
+@@ -18,26 +18,90 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +225,24 @@ static inline int local_add_negative(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +265,23 @@ static inline long local_add_return(long
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_MOV "%0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/microcode.h linux-2.6.31.7/arch/x86/include/asm/microcode.h
+--- linux-2.6.31.7/arch/x86/include/asm/microcode.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/microcode.h 2009-12-08 17:39:42.806650681 -0500
+@@ -38,18 +38,18 @@ struct ucode_cpu_info {
+ extern struct ucode_cpu_info ucode_cpu_info[];
+
+ #ifdef CONFIG_MICROCODE_INTEL
+-extern struct microcode_ops * __init init_intel_microcode(void);
++extern const struct microcode_ops * __init init_intel_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_intel_microcode(void)
++static inline const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return NULL;
+ }
+ #endif /* CONFIG_MICROCODE_INTEL */
+
+ #ifdef CONFIG_MICROCODE_AMD
+-extern struct microcode_ops * __init init_amd_microcode(void);
++extern const struct microcode_ops * __init init_amd_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_amd_microcode(void)
++static inline const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return NULL;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/mman.h linux-2.6.31.7/arch/x86/include/asm/mman.h
+--- linux-2.6.31.7/arch/x86/include/asm/mman.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/mman.h 2009-12-08 17:39:42.806650681 -0500
+@@ -17,4 +17,14 @@
+ #define MCL_CURRENT 1 /* lock all current mappings */
+ #define MCL_FUTURE 2 /* lock all future mappings */
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/mmu_context.h linux-2.6.31.7/arch/x86/include/asm/mmu_context.h
+--- linux-2.6.31.7/arch/x86/include/asm/mmu_context.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/mmu_context.h 2009-12-08 17:39:42.807651339 -0500
+@@ -34,11 +34,17 @@ static inline void switch_mm(struct mm_s
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpu_clear(cpu, prev->cpu_vm_mask);
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_X86_32
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+@@ -52,6 +58,26 @@ static inline void switch_mm(struct mm_s
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!nx_enabled) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
+ }
+ #ifdef CONFIG_SMP
+ else {
+@@ -65,6 +91,19 @@ static inline void switch_mm(struct mm_s
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!nx_enabled)
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+ }
+ #endif
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/mmu.h linux-2.6.31.7/arch/x86/include/asm/mmu.h
+--- linux-2.6.31.7/arch/x86/include/asm/mmu.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/mmu.h 2009-12-08 17:39:42.806650681 -0500
+@@ -9,10 +9,23 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
++
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/module.h linux-2.6.31.7/arch/x86/include/asm/module.h
+--- linux-2.6.31.7/arch/x86/include/asm/module.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/module.h 2009-12-08 17:39:42.810160540 -0500
+@@ -74,7 +74,12 @@ struct mod_arch_specific {};
+ # else
+ # define MODULE_STACKSIZE ""
+ # endif
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
++# ifdef CONFIG_GRKERNSEC
++# define MODULE_GRSEC "GRSECURITY "
++# else
++# define MODULE_GRSEC ""
++# endif
++# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC
+ #endif
+
+ #endif /* _ASM_X86_MODULE_H */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/page_32_types.h linux-2.6.31.7/arch/x86/include/asm/page_32_types.h
+--- linux-2.6.31.7/arch/x86/include/asm/page_32_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/page_32_types.h 2009-12-08 17:39:42.810830384 -0500
+@@ -15,6 +15,10 @@
+ */
+ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
++#endif
++
+ #ifdef CONFIG_4KSTACKS
+ #define THREAD_ORDER 0
+ #else
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/page_64_types.h linux-2.6.31.7/arch/x86/include/asm/page_64_types.h
+--- linux-2.6.31.7/arch/x86/include/asm/page_64_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/page_64_types.h 2009-12-08 17:39:42.810830384 -0500
+@@ -39,6 +39,9 @@
+ #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+ #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
+ #define __PHYSICAL_MASK_SHIFT 46
+ #define __VIRTUAL_MASK_SHIFT 47
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/paravirt.h linux-2.6.31.7/arch/x86/include/asm/paravirt.h
+--- linux-2.6.31.7/arch/x86/include/asm/paravirt.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/paravirt.h 2009-12-08 17:39:42.811672255 -0500
+@@ -350,6 +350,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct raw_spinlock;
+@@ -1439,6 +1445,21 @@ static inline void __set_fixmap(unsigned
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return pv_mmu_ops.pax_open_kernel();
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return pv_mmu_ops.pax_close_kernel();
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ void _paravirt_nop(void);
+ u32 _paravirt_ident_32(u32);
+ u64 _paravirt_ident_64(u64);
+@@ -1572,7 +1593,7 @@ static inline unsigned long __raw_local_
+
+ static inline void raw_local_irq_restore(unsigned long f)
+ {
+- PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
++ return PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
+ }
+
+ static inline void raw_local_irq_disable(void)
+@@ -1670,7 +1691,7 @@ static inline unsigned long __raw_local_
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1695,6 +1716,31 @@ static inline unsigned long __raw_local_
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
+
+ #ifdef CONFIG_X86_32
++
++#ifdef CONFIG_PAX_KERNEXEC
++#define PAX_EXIT_KERNEL \
++ bt $16, %esi; \
++ jc 1f; \
++ push %eax; push %ecx; \
++ movl %esi, %eax; \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);\
++ pop %ecx; pop %eax; \
++1:
++
++#define PAX_ENTER_KERNEL \
++ push %eax; push %ecx; \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ movl %eax, %esi; \
++ bts $16, %eax; \
++ jc 1f; \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);\
++1: \
++ pop %ecx; pop %eax;
++#else
++#define PAX_EXIT_KERNEL
++#define PAX_ENTER_KERNEL
++#endif
++
+ #define GET_CR0_INTO_EAX \
+ push %ecx; push %edx; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pci_x86.h linux-2.6.31.7/arch/x86/include/asm/pci_x86.h
+--- linux-2.6.31.7/arch/x86/include/asm/pci_x86.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pci_x86.h 2009-12-08 17:39:42.811672255 -0500
+@@ -95,10 +95,10 @@ struct pci_raw_ops {
+ int reg, int len, u32 val);
+ };
+
+-extern struct pci_raw_ops *raw_pci_ops;
+-extern struct pci_raw_ops *raw_pci_ext_ops;
++extern const struct pci_raw_ops *raw_pci_ops;
++extern const struct pci_raw_ops *raw_pci_ext_ops;
+
+-extern struct pci_raw_ops pci_direct_conf1;
++extern const struct pci_raw_ops pci_direct_conf1;
+ extern bool port_cf9_safe;
+
+ /* arch_initcall level */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgalloc.h linux-2.6.31.7/arch/x86/include/asm/pgalloc.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgalloc.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgalloc.h 2009-12-08 17:39:42.811672255 -0500
+@@ -58,6 +58,13 @@ static inline void pmd_populate_kernel(s
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable-2level.h linux-2.6.31.7/arch/x86/include/asm/pgtable-2level.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable-2level.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable-2level.h 2009-12-08 17:39:42.812655362 -0500
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable_32.h linux-2.6.31.7/arch/x86/include/asm/pgtable_32.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable_32.h 2009-12-08 17:39:42.820049218 -0500
+@@ -26,8 +26,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +46,11 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define __KM_PTE \
+ (in_nmi() ? KM_NMI_PTE : \
+@@ -72,7 +75,9 @@ extern void set_pmd_pfn(unsigned long, u
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -84,6 +89,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable_32_types.h linux-2.6.31.7/arch/x86/include/asm/pgtable_32_types.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable_32_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable_32_types.h 2009-12-08 17:39:42.820049218 -0500
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable-3level.h linux-2.6.31.7/arch/x86/include/asm/pgtable-3level.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable-3level.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable-3level.h 2009-12-08 17:39:42.812655362 -0500
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable_64.h linux-2.6.31.7/arch/x86/include/asm/pgtable_64.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable_64.h 2009-12-08 17:39:42.821152761 -0500
+@@ -16,9 +16,12 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
++extern pmd_t level2_ident_pgt[512*4];
+ extern pgd_t init_level4_pgt[];
+
+ #define swapper_pg_dir init_level4_pgt
+@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable.h linux-2.6.31.7/arch/x86/include/asm/pgtable.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable.h 2009-12-08 17:39:42.820049218 -0500
+@@ -84,12 +84,51 @@ static inline void __init paravirt_paget
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0();
++ if (likely(cr0 & X86_CR0_WP))
++ write_cr0(cr0 & ~X86_CR0_WP);
++ return cr0;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0();
++ if (likely(!(cr0 & X86_CR0_WP)))
++ write_cr0(cr0 | X86_CR0_WP);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -172,9 +211,29 @@ static inline pte_t pte_wrprotect(pte_t
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -482,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -623,7 +682,9 @@ static inline void ptep_set_wrprotect(st
+ */
+ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ memcpy(dst, src, count * sizeof(pgd_t));
++ pax_close_kernel();
+ }
+
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/pgtable_types.h linux-2.6.31.7/arch/x86/include/asm/pgtable_types.h
+--- linux-2.6.31.7/arch/x86/include/asm/pgtable_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/pgtable_types.h 2009-12-08 17:39:42.821152761 -0500
+@@ -16,12 +16,11 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -39,7 +38,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -55,8 +53,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -93,6 +93,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -103,8 +106,8 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -163,8 +166,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -277,7 +280,15 @@ static inline pteval_t pte_flags(pte_t p
+ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
+ extern int nx_enabled;
++#else
++#define nx_enabled (0)
++#endif
++#else
++#define nx_enabled (1)
++#endif
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/processor.h linux-2.6.31.7/arch/x86/include/asm/processor.h
+--- linux-2.6.31.7/arch/x86/include/asm/processor.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/processor.h 2009-12-08 17:39:42.821655527 -0500
+@@ -271,7 +271,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -900,8 +900,17 @@ static inline void spin_lock_prefetch(co
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+ .sp0 = sizeof(init_stack) + (long)&init_stack, \
+@@ -918,7 +927,7 @@ static inline void spin_lock_prefetch(co
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -929,11 +938,7 @@ static inline void spin_lock_prefetch(co
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((info)->task.thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -948,7 +953,7 @@ extern unsigned long thread_saved_pc(str
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -964,7 +969,7 @@ extern unsigned long thread_saved_pc(str
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -1001,6 +1006,10 @@ extern void start_thread(struct pt_regs
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/ptrace.h linux-2.6.31.7/arch/x86/include/asm/ptrace.h
+--- linux-2.6.31.7/arch/x86/include/asm/ptrace.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/ptrace.h 2009-12-08 17:39:42.821655527 -0500
+@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/reboot.h linux-2.6.31.7/arch/x86/include/asm/reboot.h
+--- linux-2.6.31.7/arch/x86/include/asm/reboot.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/reboot.h 2009-12-08 17:39:42.821655527 -0500
+@@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(const unsigned char *code, int length);
++void machine_real_restart(const unsigned char *code, unsigned int length);
+
+ typedef void (*nmi_shootdown_cb)(int, struct die_args*);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/rwsem.h linux-2.6.31.7/arch/x86/include/asm/rwsem.h
+--- linux-2.6.31.7/arch/x86/include/asm/rwsem.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/rwsem.h 2009-12-08 17:39:42.821655527 -0500
+@@ -106,10 +106,26 @@ static inline void __down_read(struct rw
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX " incl (%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl (%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* adds 0x00000001, returns the old value */
+- " jns 1f\n"
++ " jns 2f\n"
+ " call call_rwsem_down_read_failed\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending down_read\n\t"
+ : "+m" (sem->count)
+ : "a" (sem)
+@@ -124,13 +140,29 @@ static inline int __down_read_trylock(st
+ __s32 result, tmp;
+ asm volatile("# beginning __down_read_trylock\n\t"
+ " movl %0,%1\n\t"
+- "1:\n\t"
++ "2:\n\t"
+ " movl %1,%2\n\t"
+ " addl %3,%2\n\t"
+- " jle 2f\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "subl %3,%2\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ " jle 3f\n\t"
+ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+- " jnz 1b\n\t"
+- "2:\n\t"
++ " jnz 2b\n\t"
++ "3:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
+@@ -148,12 +180,28 @@ static inline void __down_write_nested(s
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtract 0x0000ffff, returns the old value */
+ " testl %%edx,%%edx\n\t"
+ /* was the count 0 before? */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_down_write_failed\n"
+- "1:\n"
++ "2:\n"
+ "# ending down_write"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -186,10 +234,26 @@ static inline void __up_read(struct rw_s
+ __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtracts 1, returns the old value */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_wake\n"
+- "1:\n"
++ "2:\n"
+ "# ending __up_read\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -204,11 +268,27 @@ static inline void __up_write(struct rw_
+ asm volatile("# beginning __up_write\n\t"
+ " movl %2,%%edx\n\t"
+ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __up_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+@@ -222,10 +302,26 @@ static inline void __downgrade_write(str
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX " addl %2,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %2,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+@@ -237,7 +333,23 @@ static inline void __downgrade_write(str
+ */
+ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (sem->count)
+ : "ir" (delta));
+ }
+@@ -249,7 +361,23 @@ static inline int rwsem_atomic_update(in
+ {
+ int tmp = delta;
+
+- asm volatile(LOCK_PREFIX "xadd %0,%1"
++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/segment.h linux-2.6.31.7/arch/x86/include/asm/segment.h
+--- linux-2.6.31.7/arch/x86/include/asm/segment.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/segment.h 2009-12-08 17:39:42.821655527 -0500
+@@ -88,7 +88,7 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
+ #ifdef CONFIG_SMP
+ #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
+ #else
+@@ -102,6 +102,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -139,7 +145,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/spinlock.h linux-2.6.31.7/arch/x86/include/asm/spinlock.h
+--- linux-2.6.31.7/arch/x86/include/asm/spinlock.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/spinlock.h 2009-12-08 17:39:42.823122099 -0500
+@@ -249,18 +249,50 @@ static inline int __raw_write_can_lock(r
+ static inline void __raw_read_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+- "jns 1f\n"
+- "call __read_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl $1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jns 2f\n"
++ "call __read_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw) : "memory");
+ }
+
+ static inline void __raw_write_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
+- "jz 1f\n"
+- "call __write_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl %1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jz 2f\n"
++ "call __write_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+@@ -286,12 +318,45 @@ static inline int __raw_write_trylock(ra
+
+ static inline void __raw_read_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void __raw_write_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "addl %1, %0"
++ asm volatile(LOCK_PREFIX "addl %1, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/system.h linux-2.6.31.7/arch/x86/include/asm/system.h
+--- linux-2.6.31.7/arch/x86/include/asm/system.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/system.h 2009-12-08 17:39:42.823652117 -0500
+@@ -227,7 +227,7 @@ static inline unsigned long get_limit(un
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -367,7 +367,7 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/uaccess_32.h linux-2.6.31.7/arch/x86/include/asm/uaccess_32.h
+--- linux-2.6.31.7/arch/x86/include/asm/uaccess_32.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/uaccess_32.h 2009-12-08 17:39:42.823652117 -0500
+@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -138,6 +146,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -160,6 +174,10 @@ static __always_inline unsigned long __c
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -182,14 +200,62 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
++ if ((long)n < 0)
++ return n;
++
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
++
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static __always_inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static __always_inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
++ return n;
+ }
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
+ long __must_check strncpy_from_user(char *dst, const char __user *src,
+ long count);
+ long __must_check __strncpy_from_user(char *dst,
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/uaccess_64.h linux-2.6.31.7/arch/x86/include/asm/uaccess_64.h
+--- linux-2.6.31.7/arch/x86/include/asm/uaccess_64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/uaccess_64.h 2009-12-08 17:39:42.824654145 -0500
+@@ -10,6 +10,8 @@
+ #include <linux/lockdep.h>
+ #include <asm/page.h>
+
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
++
+ /*
+ * Copy To/From Userspace
+ */
+@@ -19,20 +21,22 @@ __must_check unsigned long
+ copy_user_generic(void *to, const void *from, unsigned len);
+
+ __must_check unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+ copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
+ return copy_user_generic(dst, (__force void *)src, size);
++ }
+ switch (size) {
+ case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+@@ -70,13 +74,19 @@ int __copy_from_user(void *dst, const vo
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
+ return copy_user_generic((__force void *)dst, src, size);
++ }
+ switch (size) {
+ case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+@@ -114,11 +124,39 @@ int __copy_to_user(void __user *dst, con
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
+ {
+- int ret = 0;
++ if (access_ok(VERIFY_WRITE, to, len))
++ len = __copy_to_user(to, from, len);
++ return len;
++}
++
++static __always_inline __must_check
++unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
++{
++ if ((int)len < 0)
++ return len;
++
++ if (access_ok(VERIFY_READ, from, len))
++ len = __copy_from_user(to, from, len);
++ else if ((int)len > 0) {
++ if (!__builtin_constant_p(len))
++ check_object_size(to, len, false);
++ memset(to, 0, len);
++ }
++ return len;
++}
++
++static __always_inline __must_check
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++{
++ unsigned ret = 0;
+
+ might_fault();
++
++ if ((int)size < 0)
++ return size;
++
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+@@ -179,30 +217,38 @@ __must_check unsigned long __clear_user(
+ __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+ unsigned size);
+
+-static __must_check __always_inline int
++static __must_check __always_inline unsigned long
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+ {
+ might_sleep();
++
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
++extern unsigned long
+ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/uaccess.h linux-2.6.31.7/arch/x86/include/asm/uaccess.h
+--- linux-2.6.31.7/arch/x86/include/asm/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/uaccess.h 2009-12-08 17:39:42.823652117 -0500
+@@ -8,8 +8,11 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
++#include <asm/segment.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+@@ -29,7 +32,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#ifdef CONFIG_X86_32
++void __set_fs(mm_segment_t x, int cpu);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -77,7 +85,29 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ for (; __addr_ao <= __end_ao; __addr_ao += PAGE_SIZE) { \
++ char __c_ao; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr_ao))\
++ break; \
++ if (type != VERIFY_WRITE) \
++ continue; \
++ if (__put_user(__c_ao, (char __user *)__addr_ao))\
++ break; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -183,13 +213,21 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#ifdef CONFIG_X86_32
++#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
++#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
++#else
++#define _ASM_LOAD_USER_DS(ds)
++#define _ASM_LOAD_KERNEL_DS
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: movl %%eax,%%ds:0(%2)\n" \
++ "2: movl %%edx,%%ds:4(%2)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+@@ -197,15 +235,18 @@ extern int __get_user_bad(void);
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (err) \
+- : "A" (x), "r" (addr), "i" (errret), "0" (err))
++ : "A" (x), "r" (addr), "i" (errret), "0" (err), \
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: movl %%eax,%%ds:0(%1)\n" \
++ "2: movl %%edx,%%ds:4(%1)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+- : : "A" (x), "r" (addr))
++ : : "A" (x), "r" (addr), "r"(__USER_DS))
+
+ #define __put_user_x8(x, ptr, __ret_pu) \
+ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
+@@ -374,16 +415,18 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %%ds:%2,%"rtype"1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
+- : "m" (__m(addr)), "i" (errret), "0" (err))
++ : "=r" (err), ltype (x) \
++ : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
+
+ #define __get_user_size_ex(x, ptr, size) \
+ do { \
+@@ -407,10 +450,12 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %%ds:%1,%"rtype"0\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : ltype(x) : "m" (__m(addr)))
++ : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
+
+ #define __put_user_nocheck(x, ptr, size) \
+ ({ \
+@@ -424,7 +469,7 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+@@ -438,21 +483,26 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %"rtype"1,%%ds:%2\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %"rtype"0,%%ds:%1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : : ltype(x), "m" (__m(addr)))
++ : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
+
+ /*
+ * uaccess_try and catch
+@@ -530,7 +580,7 @@ struct __large_struct { unsigned long bu
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+@@ -567,6 +617,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/vgtod.h linux-2.6.31.7/arch/x86/include/asm/vgtod.h
+--- linux-2.6.31.7/arch/x86/include/asm/vgtod.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/vgtod.h 2009-12-08 17:39:42.824654145 -0500
+@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
+ int sysctl_enabled;
+ struct timezone sys_tz;
+ struct { /* extract of a clocksource struct */
++ char name[8];
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/vmi.h linux-2.6.31.7/arch/x86/include/asm/vmi.h
+--- linux-2.6.31.7/arch/x86/include/asm/vmi.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/vmi.h 2009-12-08 17:39:42.824654145 -0500
+@@ -191,6 +191,7 @@ struct vrom_header {
+ u8 reserved[96]; /* Reserved for headers */
+ char vmi_init[8]; /* VMI_Init jump point */
+ char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
++ char rom_data[8048]; /* rest of the option ROM */
+ } __attribute__((packed));
+
+ struct pnp_header {
+diff -urNp linux-2.6.31.7/arch/x86/include/asm/vsyscall.h linux-2.6.31.7/arch/x86/include/asm/vsyscall.h
+--- linux-2.6.31.7/arch/x86/include/asm/vsyscall.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/include/asm/vsyscall.h 2009-12-08 17:39:42.825667353 -0500
+@@ -15,9 +15,10 @@ enum vsyscall_num {
+
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
++#include <linux/getcpu.h>
++#include <linux/time.h>
+
+ #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+
+ /* Definitions for CONFIG_GENERIC_TIME definitions */
+ #define __section_vsyscall_gtod_data __attribute__ \
+@@ -31,7 +32,6 @@ enum vsyscall_num {
+ #define VGETCPU_LSL 2
+
+ extern int __vgetcpu_mode;
+-extern volatile unsigned long __jiffies;
+
+ /* kernel space (writeable) */
+ extern int vgetcpu_mode;
+@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
+
+ extern void map_vsyscall(void);
+
++extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
++extern time_t vtime(time_t *t);
++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff -urNp linux-2.6.31.7/arch/x86/Kconfig linux-2.6.31.7/arch/x86/Kconfig
+--- linux-2.6.31.7/arch/x86/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/Kconfig 2009-12-08 17:39:42.779150095 -0500
+@@ -1098,7 +1098,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1416,7 +1416,7 @@ config X86_PAT
+
+ config EFI
+ bool "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !PAX_KERNEXEC
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1507,6 +1507,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1571,6 +1572,7 @@ config PHYSICAL_ALIGN
+ hex
+ prompt "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1602,9 +1604,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+ ---help---
+diff -urNp linux-2.6.31.7/arch/x86/Kconfig.cpu linux-2.6.31.7/arch/x86/Kconfig.cpu
+--- linux-2.6.31.7/arch/x86/Kconfig.cpu 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/Kconfig.cpu 2009-12-08 17:39:42.793644779 -0500
+@@ -331,7 +331,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_WP_WORKS_OK
+ def_bool y
+@@ -351,7 +351,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -397,7 +397,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -urNp linux-2.6.31.7/arch/x86/Kconfig.debug linux-2.6.31.7/arch/x86/Kconfig.debug
+--- linux-2.6.31.7/arch/x86/Kconfig.debug 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/Kconfig.debug 2009-12-08 17:39:42.793644779 -0500
+@@ -99,7 +99,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+diff -urNp linux-2.6.31.7/arch/x86/kernel/acpi/boot.c linux-2.6.31.7/arch/x86/kernel/acpi/boot.c
+--- linux-2.6.31.7/arch/x86/kernel/acpi/boot.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/acpi/boot.c 2009-12-08 17:39:42.825667353 -0500
+@@ -1609,7 +1609,7 @@ static struct dmi_system_id __initdata a
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
+- {}
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.31.7/arch/x86/kernel/acpi/realmode/wakeup.S
+--- linux-2.6.31.7/arch/x86/kernel/acpi/realmode/wakeup.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/acpi/realmode/wakeup.S 2009-12-08 17:39:42.826654440 -0500
+@@ -104,7 +104,7 @@ _start:
+ movl %eax, %ecx
+ orl %edx, %ecx
+ jz 1f
+- movl $0xc0000080, %ecx
++ mov $MSR_EFER, %ecx
+ wrmsr
+ 1:
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/acpi/sleep.c linux-2.6.31.7/arch/x86/kernel/acpi/sleep.c
+--- linux-2.6.31.7/arch/x86/kernel/acpi/sleep.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/acpi/sleep.c 2009-12-08 17:39:42.826654440 -0500
+@@ -11,11 +11,12 @@
+ #include <linux/cpumask.h>
+ #include <asm/segment.h>
+ #include <asm/desc.h>
++#include <asm/e820.h>
+
+ #include "realmode/wakeup.h"
+ #include "sleep.h"
+
+-unsigned long acpi_wakeup_address;
++unsigned long acpi_wakeup_address = 0x2000;
+ unsigned long acpi_realmode_flags;
+
+ /* address in low memory of the wakeup routine. */
+@@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
+ header->trampoline_segment = setup_trampoline() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start.sp = temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
+ return;
+ }
+
+- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
+-
+- if (!acpi_realmode) {
+- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
+- return;
+- }
+-
+- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
++ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
++ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
+ }
+
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.31.7/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-2.6.31.7/arch/x86/kernel/acpi/wakeup_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/acpi/wakeup_32.S 2009-12-08 17:39:42.826654440 -0500
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff -urNp linux-2.6.31.7/arch/x86/kernel/alternative.c linux-2.6.31.7/arch/x86/kernel/alternative.c
+--- linux-2.6.31.7/arch/x86/kernel/alternative.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/alternative.c 2009-12-08 17:39:42.826654440 -0500
+@@ -400,7 +400,7 @@ void apply_paravirt(struct paravirt_patc
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -485,11 +485,16 @@ void __init alternative_instructions(voi
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *text_poke_early(void *addr, const void *opcode, size_t len)
++void *__kprobes text_poke_early(void *addr, const void *opcode, size_t len)
+ {
+ unsigned long flags;
++
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ sync_core();
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+@@ -512,35 +517,21 @@ void *text_poke_early(void *addr, const
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
+ return addr;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/apic/io_apic.c linux-2.6.31.7/arch/x86/kernel/apic/io_apic.c
+--- linux-2.6.31.7/arch/x86/kernel/apic/io_apic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/apic/io_apic.c 2009-12-08 17:39:42.847653452 -0500
+@@ -719,7 +719,7 @@ struct IO_APIC_route_entry **alloc_ioapi
+ ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
+ GFP_ATOMIC);
+ if (!ioapic_entries)
+- return 0;
++ return NULL;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ ioapic_entries[apic] =
+@@ -736,7 +736,7 @@ nomem:
+ kfree(ioapic_entries[apic]);
+ kfree(ioapic_entries);
+
+- return 0;
++ return NULL;
+ }
+
+ /*
+@@ -1153,7 +1153,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1161,7 +1161,7 @@ void lock_vector_lock(void)
+ spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ spin_unlock(&vector_lock);
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/apm_32.c linux-2.6.31.7/arch/x86/kernel/apm_32.c
+--- linux-2.6.31.7/arch/x86/kernel/apm_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/apm_32.c 2009-12-08 17:39:42.848657866 -0500
+@@ -403,7 +403,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitq
+ static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+ static struct apm_user *user_list;
+ static DEFINE_SPINLOCK(user_list_lock);
+-static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } };
++static const struct desc_struct bad_bios_desc = { { { 0, 0x00409300 } } };
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+
+@@ -580,7 +580,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -589,7 +592,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -656,7 +663,10 @@ static long __apm_bios_call_simple(void
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -664,7 +674,11 @@ static long __apm_bios_call_simple(void
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -967,7 +981,7 @@ recalc:
+
+ static void apm_power_off(void)
+ {
+- unsigned char po_bios_call[] = {
++ const unsigned char po_bios_call[] = {
+ 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
+ 0x8e, 0xd0, /* movw ax,ss */
+ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
+@@ -1925,7 +1939,10 @@ static const struct file_operations apm_
+ static struct miscdevice apm_device = {
+ APM_MINOR_DEV,
+ "apm_bios",
+- &apm_bios_fops
++ &apm_bios_fops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+
+@@ -2246,7 +2263,7 @@ static struct dmi_system_id __initdata a
+ { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
+ },
+
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ /*
+@@ -2337,8 +2354,11 @@ static int __init apm_init(void)
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
++
++ pax_open_kernel();
+ set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
+ _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
++ pax_close_kernel();
+
+ /*
+ * Set up the long jump entry point to the APM BIOS, which is called
+@@ -2358,12 +2378,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_base(gdt[APM_CS >> 3],
+ __va((unsigned long)apm_info.bios.cseg << 4));
+ set_base(gdt[APM_CS_16 >> 3],
+ __va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_base(gdt[APM_DS >> 3],
+ __va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/asm-offsets_32.c linux-2.6.31.7/arch/x86/kernel/asm-offsets_32.c
+--- linux-2.6.31.7/arch/x86/kernel/asm-offsets_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/asm-offsets_32.c 2009-12-08 17:39:42.849658847 -0500
+@@ -115,6 +115,11 @@ void foo(void)
+ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
+ #endif
+
+ #ifdef CONFIG_XEN
+diff -urNp linux-2.6.31.7/arch/x86/kernel/asm-offsets_64.c linux-2.6.31.7/arch/x86/kernel/asm-offsets_64.c
+--- linux-2.6.31.7/arch/x86/kernel/asm-offsets_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/asm-offsets_64.c 2009-12-08 17:39:42.849658847 -0500
+@@ -114,6 +114,7 @@ int main(void)
+ ENTRY(cr8);
+ BLANK();
+ #undef ENTRY
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/common.c linux-2.6.31.7/arch/x86/kernel/cpu/common.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/common.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/common.c 2009-12-08 17:39:42.850658648 -0500
+@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitcon
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
+- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
+- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
+- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
+- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
+- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+-#else
+- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
+- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
+- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
+- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+-
+- [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } },
+- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -345,7 +291,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -799,6 +745,10 @@ static void __cpuinit identify_cpu(struc
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -982,7 +932,7 @@ static __init int setup_disablecpuid(cha
+ __setup("clearcpuid=", setup_disablecpuid);
+
+ #ifdef CONFIG_X86_64
+-struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++struct desc_ptr idt_descr __read_only = { 256 * 16 - 1, (unsigned long) idt_table };
+
+ DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
+@@ -1092,7 +1042,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ orig_ist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1190,7 +1140,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2009-12-08 17:39:42.851656749 -0500
+@@ -586,7 +586,7 @@ static const struct dmi_system_id sw_any
+ DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+ #endif
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2009-12-08 17:39:42.851656749 -0500
+@@ -225,7 +225,7 @@ static struct cpu_model models[] =
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
+
+- { NULL, }
++ { NULL, NULL, 0, NULL}
+ };
+ #undef _BANIAS
+ #undef BANIAS
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/intel.c linux-2.6.31.7/arch/x86/kernel/cpu/intel.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/intel.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/intel.c 2009-12-08 17:39:42.852751150 -0500
+@@ -140,7 +140,7 @@ static void __cpuinit trap_init_f00f_bug
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.31.7/arch/x86/kernel/cpu/intel_cacheinfo.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/intel_cacheinfo.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/intel_cacheinfo.c 2009-12-08 17:39:42.853654230 -0500
+@@ -842,7 +842,7 @@ static ssize_t store(struct kobject * ko
+ return ret;
+ }
+
+-static struct sysfs_ops sysfs_ops = {
++static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+ };
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/Makefile linux-2.6.31.7/arch/x86/kernel/cpu/Makefile
+--- linux-2.6.31.7/arch/x86/kernel/cpu/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/Makefile 2009-12-08 17:39:42.849658847 -0500
+@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_common.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o addon_cpuid_features.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce_amd.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce_amd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce_amd.c 2009-12-08 17:39:42.854653213 -0500
+@@ -388,7 +388,7 @@ static ssize_t store(struct kobject *kob
+ return ret;
+ }
+
+-static struct sysfs_ops threshold_ops = {
++static const struct sysfs_ops threshold_ops = {
+ .show = show,
+ .store = store,
+ };
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mcheck/mce.c 2009-12-08 17:39:42.854653213 -0500
+@@ -1370,14 +1370,14 @@ void __cpuinit mcheck_init(struct cpuinf
+ */
+
+ static DEFINE_SPINLOCK(mce_state_lock);
+-static int open_count; /* #times opened */
++static atomic_t open_count; /* #times opened */
+ static int open_exclu; /* already open exclusive? */
+
+ static int mce_open(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_state_lock);
+
+- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
++ if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_state_lock);
+
+ return -EBUSY;
+@@ -1385,7 +1385,7 @@ static int mce_open(struct inode *inode,
+
+ if (file->f_flags & O_EXCL)
+ open_exclu = 1;
+- open_count++;
++ atomic_inc(&open_count);
+
+ spin_unlock(&mce_state_lock);
+
+@@ -1396,7 +1396,7 @@ static int mce_release(struct inode *ino
+ {
+ spin_lock(&mce_state_lock);
+
+- open_count--;
++ atomic_dec(&open_count);
+ open_exclu = 0;
+
+ spin_unlock(&mce_state_lock);
+@@ -1536,6 +1536,7 @@ static struct miscdevice mce_log_device
+ MISC_MCELOG_MINOR,
+ "mcelog",
+ &mce_chrdev_ops,
++ {NULL, NULL}, NULL, NULL
+ };
+
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/amd.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/amd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/amd.c 2009-12-08 17:39:42.855653926 -0500
+@@ -101,7 +101,7 @@ static int amd_validate_add_page(unsigne
+ return 0;
+ }
+
+-static struct mtrr_ops amd_mtrr_ops = {
++static const struct mtrr_ops amd_mtrr_ops = {
+ .vendor = X86_VENDOR_AMD,
+ .set = amd_set_mtrr,
+ .get = amd_get_mtrr,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/centaur.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/centaur.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/centaur.c 2009-12-08 17:39:42.855653926 -0500
+@@ -205,7 +205,7 @@ static int centaur_validate_add_page(uns
+ return 0;
+ }
+
+-static struct mtrr_ops centaur_mtrr_ops = {
++static const struct mtrr_ops centaur_mtrr_ops = {
+ .vendor = X86_VENDOR_CENTAUR,
+ // .init = centaur_mcr_init,
+ .set = centaur_set_mcr,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/cyrix.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/cyrix.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/cyrix.c 2009-12-08 17:39:42.855653926 -0500
+@@ -254,7 +254,7 @@ static void cyrix_set_all(void)
+ post_set();
+ }
+
+-static struct mtrr_ops cyrix_mtrr_ops = {
++static const struct mtrr_ops cyrix_mtrr_ops = {
+ .vendor = X86_VENDOR_CYRIX,
+ // .init = cyrix_arr_init,
+ .set_all = cyrix_set_all,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/generic.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/generic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/generic.c 2009-12-08 17:39:42.856635689 -0500
+@@ -23,14 +23,14 @@ static struct fixed_range_block fixed_ra
+ { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
+ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
+ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
+- {}
++ { 0, 0 }
+ };
+
+ static unsigned long smp_changes_mask;
+ static int mtrr_state_set;
+ u64 mtrr_tom2;
+
+-struct mtrr_state_type mtrr_state = {};
++struct mtrr_state_type mtrr_state;
+ EXPORT_SYMBOL_GPL(mtrr_state);
+
+ /**
+@@ -718,7 +718,7 @@ int positive_have_wrcomb(void)
+
+ /* generic structure...
+ */
+-struct mtrr_ops generic_mtrr_ops = {
++const struct mtrr_ops generic_mtrr_ops = {
+ .use_intel_if = 1,
+ .set_all = generic_set_all,
+ .get = generic_get_mtrr,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/main.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/main.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/main.c 2009-12-08 17:39:42.857654730 -0500
+@@ -54,14 +54,14 @@ static DEFINE_MUTEX(mtrr_mutex);
+
+ u64 size_or_mask, size_and_mask;
+
+-static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
++static const struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+-struct mtrr_ops * mtrr_if = NULL;
++const struct mtrr_ops * mtrr_if = NULL;
+
+ static void set_mtrr(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
+
+-void set_mtrr_ops(struct mtrr_ops * ops)
++void set_mtrr_ops(const struct mtrr_ops * ops)
+ {
+ if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
+ mtrr_ops[ops->vendor] = ops;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/mtrr.h
+--- linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2009-12-08 17:39:42.857654730 -0500
+@@ -33,7 +33,7 @@ extern int generic_get_free_region(unsig
+ extern int generic_validate_add_page(unsigned long base, unsigned long size,
+ unsigned int type);
+
+-extern struct mtrr_ops generic_mtrr_ops;
++extern const struct mtrr_ops generic_mtrr_ops;
+
+ extern int positive_have_wrcomb(void);
+
+@@ -54,10 +54,10 @@ void fill_mtrr_var_range(unsigned int in
+ u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
+ void get_mtrr_state(void);
+
+-extern void set_mtrr_ops(struct mtrr_ops * ops);
++extern void set_mtrr_ops(const struct mtrr_ops * ops);
+
+ extern u64 size_or_mask, size_and_mask;
+-extern struct mtrr_ops * mtrr_if;
++extern const struct mtrr_ops * mtrr_if;
+
+ #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
+ #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/perf_counter.c linux-2.6.31.7/arch/x86/kernel/cpu/perf_counter.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/perf_counter.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/perf_counter.c 2009-12-08 17:39:42.858653131 -0500
+@@ -1920,7 +1920,7 @@ perf_callchain_user(struct pt_regs *regs
+ break;
+
+ callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (__force const void __user *)frame.next_frame;
+ }
+ }
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.31.7/arch/x86/kernel/cpu/perfctr-watchdog.c
+--- linux-2.6.31.7/arch/x86/kernel/cpu/perfctr-watchdog.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/cpu/perfctr-watchdog.c 2009-12-08 17:39:42.858653131 -0500
+@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
+
+ /* Interface defining a CPU specific perfctr watchdog */
+ struct wd_ops {
+- int (*reserve)(void);
+- void (*unreserve)(void);
+- int (*setup)(unsigned nmi_hz);
+- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
+- void (*stop)(void);
++ int (* const reserve)(void);
++ void (* const unreserve)(void);
++ int (* const setup)(unsigned nmi_hz);
++ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
++ void (* const stop)(void);
+ unsigned perfctr;
+ unsigned evntsel;
+ u64 checkbit;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/crash.c linux-2.6.31.7/arch/x86/kernel/crash.c
+--- linux-2.6.31.7/arch/x86/kernel/crash.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/crash.c 2009-12-08 17:39:42.859659181 -0500
+@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
+ regs = args->regs;
+
+ #ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/doublefault_32.c linux-2.6.31.7/arch/x86/kernel/doublefault_32.c
+--- linux-2.6.31.7/arch/x86/kernel/doublefault_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/doublefault_32.c 2009-12-08 17:39:42.859659181 -0500
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cach
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff -urNp linux-2.6.31.7/arch/x86/kernel/dumpstack_32.c linux-2.6.31.7/arch/x86/kernel/dumpstack_32.c
+--- linux-2.6.31.7/arch/x86/kernel/dumpstack_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/dumpstack_32.c 2009-12-08 17:39:42.860655418 -0500
+@@ -113,11 +113,12 @@ void show_registers(struct pt_regs *regs
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp,
+@@ -125,10 +126,10 @@ void show_registers(struct pt_regs *regs
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -137,7 +138,7 @@ void show_registers(struct pt_regs *regs
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -150,6 +151,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+diff -urNp linux-2.6.31.7/arch/x86/kernel/dumpstack.c linux-2.6.31.7/arch/x86/kernel/dumpstack.c
+--- linux-2.6.31.7/arch/x86/kernel/dumpstack.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/dumpstack.c 2009-12-08 17:39:42.859659181 -0500
+@@ -181,7 +181,7 @@ void dump_stack(void)
+ #endif
+
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -242,7 +242,7 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -296,7 +296,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff -urNp linux-2.6.31.7/arch/x86/kernel/e820.c linux-2.6.31.7/arch/x86/kernel/e820.c
+--- linux-2.6.31.7/arch/x86/kernel/e820.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/e820.c 2009-12-08 17:39:42.860655418 -0500
+@@ -733,7 +733,10 @@ struct early_res {
+ };
+ static struct early_res early_res[MAX_EARLY_RES] __initdata = {
+ { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
+- {}
++#ifdef CONFIG_VM86
++ { PAGE_SIZE, ISA_START_ADDRESS, "V86 mode memory", 1 },
++#endif
++ { 0, 0, {0}, 0 }
+ };
+
+ static int __init find_overlapped_early(u64 start, u64 end)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/efi_32.c linux-2.6.31.7/arch/x86/kernel/efi_32.c
+--- linux-2.6.31.7/arch/x86/kernel/efi_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/efi_32.c 2009-12-08 17:39:42.861658062 -0500
+@@ -38,70 +38,38 @@
+ */
+
+ static unsigned long efi_rt_eflags;
+-static pgd_t efi_bak_pg_dir_pointer[2];
++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
+
+-void efi_call_phys_prelog(void)
++void __init efi_call_phys_prelog(void)
+ {
+- unsigned long cr4;
+- unsigned long temp;
+ struct desc_ptr gdt_descr;
+
+ local_irq_save(efi_rt_eflags);
+
+- /*
+- * If I don't have PAE, I should just duplicate two entries in page
+- * directory. If I have PAE, I just need to duplicate one entry in
+- * page directory.
+- */
+- cr4 = read_cr4_safe();
+
+- if (cr4 & X86_CR4_PAE) {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- swapper_pg_dir[0].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- } else {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- efi_bak_pg_dir_pointer[1].pgd =
+- swapper_pg_dir[pgd_index(0x400000)].pgd;
+- swapper_pg_dir[pgd_index(0)].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- temp = PAGE_OFFSET + 0x400000;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- swapper_pg_dir[pgd_index(temp)].pgd;
+- }
++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+ __flush_tlb_all();
+
+- gdt_descr.address = __pa(get_cpu_gdt_table(0));
++ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ }
+
+-void efi_call_phys_epilog(void)
++void __init efi_call_phys_epilog(void)
+ {
+- unsigned long cr4;
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
++ gdt_descr.address = get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- } else {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- efi_bak_pg_dir_pointer[1].pgd;
+- }
++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
+
+ /*
+ * After the lock is released, the original page table is restored.
+diff -urNp linux-2.6.31.7/arch/x86/kernel/efi_stub_32.S linux-2.6.31.7/arch/x86/kernel/efi_stub_32.S
+--- linux-2.6.31.7/arch/x86/kernel/efi_stub_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/efi_stub_32.S 2009-12-08 17:39:42.861658062 -0500
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
+
+ /*
+@@ -20,7 +21,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++ jmp 1f-__PAGE_OFFSET
+ 1:
+
+ /*
+@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++ jmp 1f+__PAGE_OFFSET
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff -urNp linux-2.6.31.7/arch/x86/kernel/entry_32.S linux-2.6.31.7/arch/x86/kernel/entry_32.S
+--- linux-2.6.31.7/arch/x86/kernel/entry_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/entry_32.S 2009-12-08 17:39:42.861658062 -0500
+@@ -191,7 +191,7 @@
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -224,7 +224,7 @@
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -232,6 +232,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ PAX_ENTER_KERNEL
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+@@ -352,7 +361,15 @@ check_userspace:
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ PAX_EXIT_KERNEL
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -414,10 +431,9 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ GET_THREAD_INFO(%ebp)
++ pushl TI_sysenter_return(%ebp)
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+@@ -430,9 +446,19 @@ sysenter_past_esp:
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -455,12 +481,23 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call pax_randomize_kstack
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++#endif
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -504,11 +541,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -538,6 +581,10 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -602,7 +649,13 @@ ldt_ss:
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+ mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
+ mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
+@@ -642,25 +695,19 @@ work_resched:
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+@@ -695,6 +742,10 @@ END(syscall_exit_work)
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+@@ -735,7 +786,13 @@ PTREGSCALL(vm86old)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
+ shl $16, %eax
+@@ -1198,7 +1255,6 @@ return_to_handler:
+ ret
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1250,12 +1306,15 @@ error_code:
+ movl %ecx, %fs
+ UNWIND_ESPFIX_STACK
+ GS_TO_REG %ecx
++
++ PAX_ENTER_KERNEL
++
+ movl PT_GS(%esp), %edi # get the function address
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ TRACE_IRQS_OFF
+@@ -1351,6 +1410,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ PAX_EXIT_KERNEL
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1391,6 +1453,9 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ PAX_EXIT_KERNEL
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+diff -urNp linux-2.6.31.7/arch/x86/kernel/entry_64.S linux-2.6.31.7/arch/x86/kernel/entry_64.S
+--- linux-2.6.31.7/arch/x86/kernel/entry_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/entry_64.S 2009-12-08 17:39:42.862668325 -0500
+@@ -1074,7 +1074,12 @@ ENTRY(\sym)
+ TRACE_IRQS_OFF
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+- PER_CPU(init_tss, %rbp)
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
++ lea init_tss(%rbp), %rbp
++#else
++ lea init_tss(%rip), %rbp
++#endif
+ subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/ftrace.c linux-2.6.31.7/arch/x86/kernel/ftrace.c
+--- linux-2.6.31.7/arch/x86/kernel/ftrace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/ftrace.c 2009-12-08 17:39:42.863659728 -0500
+@@ -284,9 +284,9 @@ int ftrace_update_ftrace_func(ftrace_fun
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+- ret = ftrace_modify_code(ip, old, new);
++ ret = ftrace_modify_code(ktla_ktva(ip), old, new);
+
+ return ret;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/head32.c linux-2.6.31.7/arch/x86/kernel/head32.c
+--- linux-2.6.31.7/arch/x86/kernel/head32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/head32.c 2009-12-08 17:39:42.863659728 -0500
+@@ -13,12 +13,13 @@
+ #include <asm/e820.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/trampoline.h>
++#include <asm/boot.h>
+
+ void __init i386_start_kernel(void)
+ {
+ reserve_trampoline_memory();
+
+- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff -urNp linux-2.6.31.7/arch/x86/kernel/head_32.S linux-2.6.31.7/arch/x86/kernel/head_32.S
+--- linux-2.6.31.7/arch/x86/kernel/head_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/head_32.S 2009-12-08 17:39:42.864658452 -0500
+@@ -19,10 +19,17 @@
+ #include <asm/setup.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/msr-index.h>
+
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+@@ -52,11 +59,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Enough space to fit pagetables for the low memory linear map */
+ MAPPING_BEYOND_END = \
+@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ .section .text.head,"ax",@progbits
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ /* test KEEP_SEGMENTS flag to see if the bootloader is asking
+ us to not reload segments */
+@@ -97,6 +113,52 @@ ENTRY(startup_32)
+ movl %eax,%gs
+ 2:
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $__LOAD_PHYSICAL_ADDR,%eax
++ movw %ax,__BOOT_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__BOOT_CS + 4(%edi)
++ movb %ah,__BOOT_CS + 7(%edi)
++ rorl $16,%eax
++
++ ljmp $(__BOOT_CS),$1f
++1:
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++ addl $__PAGE_OFFSET,%eax
++1:
++ movw %ax,__KERNEL_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_CS + 4(%edi)
++ movb %ah,__KERNEL_CS + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -140,9 +202,7 @@ ENTRY(startup_32)
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -154,9 +214,9 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
++ .long ta(default_entry) /* normal x86/PC */
++ .long ta(lguest_entry) /* lguest hypervisor */
++ .long ta(xen_entry) /* Xen hypervisor */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #endif /* CONFIG_PARAVIRT */
+@@ -217,8 +277,11 @@ default_entry:
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -248,8 +311,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(swapper_pg_dir+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
++#else
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
++#endif
+ #endif
+ jmp 3f
+ /*
+@@ -296,6 +362,7 @@ ENTRY(startup_32_smp)
+ orl %edx,%eax
+ movl %eax,%cr4
+
++#ifdef CONFIG_X86_PAE
+ btl $5, %eax # check if PAE is enabled
+ jnc 6f
+
+@@ -311,13 +378,16 @@ ENTRY(startup_32_smp)
+ jnc 6f
+
+ /* Setup EFER (Extended Feature Enable Register) */
+- movl $0xc0000080, %ecx
++ movl $MSR_EFER, %ecx
+ rdmsr
+
+ btsl $11, %eax
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++ movl $1,pa(nx_enabled)
++#endif
+ 6:
+
+ /*
+@@ -343,9 +413,7 @@ ENTRY(startup_32_smp)
+
+ #ifdef CONFIG_SMP
+ cmpb $0, ready
+- jz 1f /* Initial CPU cleans BSS */
+- jmp checkCPUtype
+-1:
++ jnz checkCPUtype /* Initial CPU cleans BSS */
+ #endif /* CONFIG_SMP */
+
+ /*
+@@ -423,7 +491,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -437,8 +505,11 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $per_cpu__gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $per_cpu__stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+@@ -456,10 +527,6 @@ is386: movl $2,%ecx # set MP
+ #ifdef CONFIG_SMP
+ movb ready, %cl
+ movb $1, ready
+- cmpb $0,%cl # the first CPU calls start_kernel
+- je 1f
+- movl (stack_start), %esp
+-1:
+ #endif /* CONFIG_SMP */
+ jmp *(initial_code)
+
+@@ -545,22 +612,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -568,8 +635,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -578,9 +648,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -607,27 +674,37 @@ ENTRY(initial_code)
+ /*
+ * BSS section
+ */
+-.section ".bss.page_aligned","wa"
+- .align PAGE_SIZE_asm
+ #ifdef CONFIG_X86_PAE
++.section .swapper_pg_pmd,"a",@progbits
+ swapper_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+ .fill 1024,4,0
+ #endif
++
+ swapper_pg_fixmap:
+ .fill 1024,4,0
++
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
+
+ /*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
++
++/*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-.section ".data.page_aligned","wa"
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE_asm
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+ .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -650,11 +727,12 @@ ENTRY(swapper_pg_dir)
+
+ .data
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
+ .long __BOOT_DS
+
+ ready: .byte 0
+
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+@@ -690,7 +768,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -701,7 +779,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -710,5 +788,59 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040930000000018 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff -urNp linux-2.6.31.7/arch/x86/kernel/head_64.S linux-2.6.31.7/arch/x86/kernel/head_64.S
+--- linux-2.6.31.7/arch/x86/kernel/head_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/head_64.S 2009-12-08 17:39:42.865660490 -0500
+@@ -38,6 +38,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ .section .text.head
+@@ -85,35 +89,22 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++ addq %rbp, level3_ident_pgt + 8(%rip)
++ addq %rbp, level3_ident_pgt + 16(%rip)
++ addq %rbp, level3_ident_pgt + 24(%rip)
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -187,6 +178,10 @@ ENTRY(secondary_startup_64)
+ btl $20,%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++ leaq init_level4_pgt(%rip), %rdi
++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -262,16 +257,16 @@ ENTRY(secondary_startup_64)
+ .quad x86_64_start_kernel
+ ENTRY(initial_gs)
+ .quad INIT_PER_CPU_VAR(irq_stack_union)
+- __FINITDATA
+
+ ENTRY(stack_start)
+ .quad init_thread_union+THREAD_SIZE-8
+ .word 0
++ __FINITDATA
+
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -316,18 +311,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
+-#endif /* CONFIG_EARLY_PRINTK */
+ .previous
++#endif /* CONFIG_EARLY_PRINTK */
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -350,13 +350,31 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+ .fill 511,8,0
++#else
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .quad level2_ident_pgt + 2*PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .quad level2_ident_pgt + 3*PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 508,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -364,20 +382,23 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .fill 507,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++ /* Since I easily can, map the first 4G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 4*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -390,33 +411,49 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++ .quad 0x0 /* unused */
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ .section .bss.page_aligned, "aw", @nobits
+ .align PAGE_SIZE
+diff -urNp linux-2.6.31.7/arch/x86/kernel/i386_ksyms_32.c linux-2.6.31.7/arch/x86/kernel/i386_ksyms_32.c
+--- linux-2.6.31.7/arch/x86/kernel/i386_ksyms_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/i386_ksyms_32.c 2009-12-08 17:39:42.865660490 -0500
+@@ -10,8 +10,12 @@
+ EXPORT_SYMBOL(mcount);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -26,3 +30,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
+diff -urNp linux-2.6.31.7/arch/x86/kernel/init_task.c linux-2.6.31.7/arch/x86/kernel/init_task.c
+--- linux-2.6.31.7/arch/x86/kernel/init_task.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/init_task.c 2009-12-08 17:39:42.865660490 -0500
+@@ -39,5 +39,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/ioport.c linux-2.6.31.7/arch/x86/kernel/ioport.c
+--- linux-2.6.31.7/arch/x86/kernel/ioport.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/ioport.c 2009-12-08 17:39:42.866660559 -0500
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on) {
++ gr_handle_ioperm();
++ return -EPERM;
++ }
++#endif
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+
+@@ -111,8 +118,13 @@ static int do_iopl(unsigned int level, s
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ gr_handle_iopl();
++ return -EPERM;
++#else
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++#endif
+ }
+ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/irq_32.c linux-2.6.31.7/arch/x86/kernel/irq_32.c
+--- linux-2.6.31.7/arch/x86/kernel/irq_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/irq_32.c 2009-12-08 17:39:42.866660559 -0500
+@@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struc
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+ irqctx->tinfo.task = curctx->tinfo.task;
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+@@ -175,7 +175,7 @@ asmlinkage void do_softirq(void)
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+
+ call_on_stack(__do_softirq, isp);
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/kernel/kgdb.c linux-2.6.31.7/arch/x86/kernel/kgdb.c
+--- linux-2.6.31.7/arch/x86/kernel/kgdb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/kgdb.c 2009-12-08 17:39:42.866660559 -0500
+@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
+ return instruction_pointer(regs);
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: */
+ .gdb_bpt_instr = { 0xcc },
+ .flags = KGDB_HW_BREAKPOINT,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/kprobes.c linux-2.6.31.7/arch/x86/kernel/kprobes.c
+--- linux-2.6.31.7/arch/x86/kernel/kprobes.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/kprobes.c 2009-12-08 17:39:42.867659667 -0500
+@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
+ char op;
+ s32 raddr;
+ } __attribute__((packed)) * jop;
+- jop = (struct __arch_jmp_op *)from;
++
++ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
++
++ pax_open_kernel();
+ jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ jop->op = RELATIVEJUMP_INSTRUCTION;
++ pax_close_kernel();
+ }
+
+ /*
+@@ -345,16 +349,18 @@ static void __kprobes fix_riprel(struct
+
+ static void __kprobes arch_copy_kprobe(struct kprobe *p)
+ {
+- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++ pax_open_kernel();
++ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++ pax_close_kernel();
+
+ fix_riprel(p);
+
+- if (can_boost(p->addr))
++ if (can_boost(ktla_ktva(p->addr)))
+ p->ainsn.boostable = 0;
+ else
+ p->ainsn.boostable = -1;
+
+- p->opcode = *p->addr;
++ p->opcode = *(ktla_ktva(p->addr));
+ }
+
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+@@ -432,7 +438,7 @@ static void __kprobes prepare_singlestep
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+@@ -453,7 +459,7 @@ static void __kprobes setup_singlestep(s
+ if (p->ainsn.boostable == 1 && !p->post_handler) {
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -523,7 +529,7 @@ static int __kprobes kprobe_handler(stru
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
+- if (*addr != BREAKPOINT_INSTRUCTION) {
++ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -775,7 +781,7 @@ static void __kprobes resume_execution(s
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -958,7 +964,7 @@ int __kprobes kprobe_exceptions_notify(s
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+diff -urNp linux-2.6.31.7/arch/x86/kernel/ldt.c linux-2.6.31.7/arch/x86/kernel/ldt.c
+--- linux-2.6.31.7/arch/x86/kernel/ldt.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/ldt.c 2009-12-08 17:39:42.867659667 -0500
+@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpus_equal(current->mm->cpu_vm_mask,
+ cpumask_of_cpu(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = ~0UL;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/machine_kexec_32.c linux-2.6.31.7/arch/x86/kernel/machine_kexec_32.c
+--- linux-2.6.31.7/arch/x86/kernel/machine_kexec_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/machine_kexec_32.c 2009-12-08 17:39:42.868655303 -0500
+@@ -26,7 +26,7 @@
+ #include <asm/system.h>
+ #include <asm/cacheflush.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/microcode_amd.c linux-2.6.31.7/arch/x86/kernel/microcode_amd.c
+--- linux-2.6.31.7/arch/x86/kernel/microcode_amd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/microcode_amd.c 2009-12-08 17:39:42.868655303 -0500
+@@ -340,7 +340,7 @@ static void microcode_fini_cpu_amd(int c
+ uci->mc = NULL;
+ }
+
+-static struct microcode_ops microcode_amd_ops = {
++static const struct microcode_ops microcode_amd_ops = {
+ .request_microcode_user = request_microcode_user,
+ .request_microcode_fw = request_microcode_fw,
+ .collect_cpu_info = collect_cpu_info_amd,
+@@ -348,7 +348,7 @@ static struct microcode_ops microcode_am
+ .microcode_fini_cpu = microcode_fini_cpu_amd,
+ };
+
+-struct microcode_ops * __init init_amd_microcode(void)
++const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return &microcode_amd_ops;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/microcode_core.c linux-2.6.31.7/arch/x86/kernel/microcode_core.c
+--- linux-2.6.31.7/arch/x86/kernel/microcode_core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/microcode_core.c 2009-12-08 17:39:42.869657951 -0500
+@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
+
+ #define MICROCODE_VERSION "2.00"
+
+-static struct microcode_ops *microcode_ops;
++static const struct microcode_ops *microcode_ops;
+
+ /*
+ * Synchronization.
+diff -urNp linux-2.6.31.7/arch/x86/kernel/microcode_intel.c linux-2.6.31.7/arch/x86/kernel/microcode_intel.c
+--- linux-2.6.31.7/arch/x86/kernel/microcode_intel.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/microcode_intel.c 2009-12-08 17:39:42.869657951 -0500
+@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
+
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+- return copy_from_user(to, from, n);
++ return copy_from_user(to, (__force const void __user *)from, n);
+ }
+
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
+ }
+
+ static void microcode_fini_cpu(int cpu)
+@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
+ uci->mc = NULL;
+ }
+
+-static struct microcode_ops microcode_intel_ops = {
++static const struct microcode_ops microcode_intel_ops = {
+ .request_microcode_user = request_microcode_user,
+ .request_microcode_fw = request_microcode_fw,
+ .collect_cpu_info = collect_cpu_info,
+@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
+ .microcode_fini_cpu = microcode_fini_cpu,
+ };
+
+-struct microcode_ops * __init init_intel_microcode(void)
++const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return &microcode_intel_ops;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/module.c linux-2.6.31.7/arch/x86/kernel/module.c
+--- linux-2.6.31.7/arch/x86/kernel/module.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/module.c 2009-12-08 17:39:42.870655612 -0500
+@@ -34,7 +34,7 @@
+ #define DEBUGP(fmt...)
+ #endif
+
+-void *module_alloc(unsigned long size)
++static void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+ struct vm_struct *area;
+
+@@ -48,9 +48,90 @@ void *module_alloc(unsigned long size)
+ if (!area)
+ return NULL;
+
+- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
+- PAGE_KERNEL_EXEC);
++ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL);
++}
++
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++ if (area)
++ return area->addr;
++
++ return NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ struct vm_struct **p, *tmp;
++
++ if (!module_region)
++ return;
++
++ if ((PAGE_SIZE-1) & (unsigned long)module_region) {
++ printk(KERN_ERR "Trying to module_free_exec() bad address (%p)\n", module_region);
++ WARN_ON(1);
++ return;
++ }
++
++ write_lock(&vmlist_lock);
++ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next)
++ if (tmp->addr == module_region)
++ break;
++
++ if (tmp) {
++ pax_open_kernel();
++ memset(tmp->addr, 0xCC, tmp->size);
++ pax_close_kernel();
++
++ *p = tmp->next;
++ kfree(tmp);
++ }
++ write_unlock(&vmlist_lock);
++
++ if (!tmp) {
++ printk(KERN_ERR "Trying to module_free_exec() nonexistent vm area (%p)\n",
++ module_region);
++ WARN_ON(1);
++ }
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void *module_alloc(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL);
++}
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
+ }
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#else
++void *module_alloc(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++#endif
+
+ /* Free memory returned from module_alloc */
+ void module_free(struct module *mod, void *module_region)
+@@ -77,14 +158,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -93,11 +176,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ pax_open_kernel();
++ *plocation += sym->st_value;
++ pax_close_kernel();
+ break;
+ case R_386_PC32:
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ pax_open_kernel();
++ *plocation += sym->st_value - location;
++ pax_close_kernel();
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+@@ -153,21 +240,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ pax_open_kernel();
+ *(u64 *)loc = val;
++ pax_close_kernel();
+ break;
+ case R_X86_64_32:
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ pax_open_kernel();
+ *(s32 *)loc = val;
++ pax_close_kernel();
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/paravirt.c linux-2.6.31.7/arch/x86/kernel/paravirt.c
+--- linux-2.6.31.7/arch/x86/kernel/paravirt.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/paravirt.c 2009-12-08 17:39:42.871659728 -0500
+@@ -54,7 +54,7 @@ u64 _paravirt_ident_64(u64 x)
+ return x;
+ }
+
+-static void __init default_banner(void)
++static void default_banner(void)
+ {
+ printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+ pv_info.name);
+@@ -125,9 +125,9 @@ unsigned paravirt_patch_jmp(void *insnbu
+
+ /* Neat trick to map patch type back to the call within the
+ * corresponding structure. */
+-static void *get_call_destination(u8 type)
++static const void *get_call_destination(u8 type)
+ {
+- struct paravirt_patch_template tmpl = {
++ const struct paravirt_patch_template tmpl = {
+ .pv_init_ops = pv_init_ops,
+ .pv_time_ops = pv_time_ops,
+ .pv_cpu_ops = pv_cpu_ops,
+@@ -138,13 +138,13 @@ static void *get_call_destination(u8 typ
+ .pv_lock_ops = pv_lock_ops,
+ #endif
+ };
+- return *((void **)&tmpl + type);
++ return *((const void **)&tmpl + type);
+ }
+
+ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len)
+ {
+- void *opfunc = get_call_destination(type);
++ const void *opfunc = get_call_destination(type);
+ unsigned ret;
+
+ if (opfunc == NULL)
+@@ -183,7 +183,7 @@ unsigned paravirt_patch_insns(void *insn
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -311,21 +311,21 @@ void arch_flush_lazy_mmu_mode(void)
+ preempt_enable();
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+ .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ .banner = default_banner,
+ .arch_setup = paravirt_nop,
+ .memory_setup = machine_specific_memory_setup,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .time_init = hpet_time_init,
+ .get_wallclock = native_get_wallclock,
+ .set_wallclock = native_set_wallclock,
+@@ -333,7 +333,7 @@ struct pv_time_ops pv_time_ops = {
+ .get_tsc_khz = native_calibrate_tsc,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .init_IRQ = native_init_IRQ,
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+@@ -346,7 +346,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -406,7 +406,7 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .end_context_switch = paravirt_nop,
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only = {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .setup_boot_clock = setup_boot_APIC_clock,
+ .setup_secondary_clock = setup_secondary_APIC_clock,
+@@ -422,7 +422,7 @@ struct pv_apic_ops pv_apic_ops = {
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+ #ifndef CONFIG_X86_64
+ .pagetable_setup_start = native_pagetable_setup_start,
+ .pagetable_setup_done = native_pagetable_setup_done,
+@@ -500,6 +500,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+ },
+
+ .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .pax_open_kernel = native_pax_open_kernel,
++ .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.31.7/arch/x86/kernel/paravirt-spinlocks.c
+--- linux-2.6.31.7/arch/x86/kernel/paravirt-spinlocks.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/paravirt-spinlocks.c 2009-12-08 17:39:42.870655612 -0500
+@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
+ __raw_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/pci-dma.c linux-2.6.31.7/arch/x86/kernel/pci-dma.c
+--- linux-2.6.31.7/arch/x86/kernel/pci-dma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/pci-dma.c 2009-12-08 17:39:42.883661585 -0500
+@@ -13,7 +13,7 @@
+
+ static int forbid_dac __read_mostly;
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int iommu_sac_force __read_mostly;
+@@ -234,7 +234,7 @@ early_param("iommu", iommu_setup);
+
+ int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ #ifdef CONFIG_PCI
+ if (mask > 0xffffffff && forbid_dac > 0) {
+diff -urNp linux-2.6.31.7/arch/x86/kernel/pci-nommu.c linux-2.6.31.7/arch/x86/kernel/pci-nommu.c
+--- linux-2.6.31.7/arch/x86/kernel/pci-nommu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/pci-nommu.c 2009-12-08 17:39:42.883661585 -0500
+@@ -79,7 +79,7 @@ static void nommu_free_coherent(struct d
+ free_pages((unsigned long)vaddr, get_order(size));
+ }
+
+-struct dma_map_ops nommu_dma_ops = {
++const struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = nommu_free_coherent,
+ .map_sg = nommu_map_sg,
+diff -urNp linux-2.6.31.7/arch/x86/kernel/process_32.c linux-2.6.31.7/arch/x86/kernel/process_32.c
+--- linux-2.6.31.7/arch/x86/kernel/process_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/process_32.c 2009-12-08 17:39:42.884658580 -0500
+@@ -70,6 +70,7 @@ EXPORT_PER_CPU_SYMBOL(current_task);
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ #ifndef CONFIG_SMP
+@@ -132,7 +133,7 @@ void __show_regs(struct pt_regs *regs, i
+ unsigned short ss, gs;
+ const char *board;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ gs = get_user_gs(regs);
+@@ -213,8 +214,8 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.bx = (unsigned long) fn;
+ regs.dx = (unsigned long) arg;
+
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+ regs.gs = __KERNEL_STACK_CANARY;
+ regs.orig_ax = -1;
+@@ -250,7 +251,7 @@ int copy_thread(unsigned long clone_flag
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+@@ -279,6 +280,7 @@ int copy_thread(unsigned long clone_flag
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS)
++//XXX needs set_fs()?
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->si, 0);
+
+@@ -349,7 +351,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+@@ -377,6 +379,11 @@ __switch_to(struct task_struct *prev_p,
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
++ __set_fs(task_thread_info(next_p)->addr_limit, cpu);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -495,3 +502,27 @@ unsigned long get_wchan(struct task_stru
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
++{
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
++
++ if (!randomize_va_space)
++ return;
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x1EUL;
++ time <<= 2;
++#else
++ time &= 0xFUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
++}
++#endif
+diff -urNp linux-2.6.31.7/arch/x86/kernel/process_64.c linux-2.6.31.7/arch/x86/kernel/process_64.c
+--- linux-2.6.31.7/arch/x86/kernel/process_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/process_64.c 2009-12-08 17:39:42.885668896 -0500
+@@ -94,7 +94,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -173,7 +173,7 @@ void __show_regs(struct pt_regs *regs, i
+ if (!board)
+ board = "";
+ printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version, board);
+@@ -384,7 +384,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+
+ /* we're going to use this soon, after a few expensive things */
+@@ -543,12 +543,11 @@ unsigned long get_wchan(struct task_stru
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+diff -urNp linux-2.6.31.7/arch/x86/kernel/process.c linux-2.6.31.7/arch/x86/kernel/process.c
+--- linux-2.6.31.7/arch/x86/kernel/process.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/process.c 2009-12-08 17:39:42.884658580 -0500
+@@ -76,7 +76,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -108,6 +108,9 @@ void flush_thread(void)
+
+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
++ loadsegment(gs, 0);
++#endif
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+ tsk->thread.debugreg2 = 0;
+@@ -611,17 +614,3 @@ static int __init idle_setup(char *str)
+ return 0;
+ }
+ early_param("idle", idle_setup);
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+diff -urNp linux-2.6.31.7/arch/x86/kernel/ptrace.c linux-2.6.31.7/arch/x86/kernel/ptrace.c
+--- linux-2.6.31.7/arch/x86/kernel/ptrace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/ptrace.c 2009-12-08 17:39:42.900703064 -0500
+@@ -934,7 +934,7 @@ static const struct user_regset_view use
+ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ {
+ int ret;
+- unsigned long __user *datap = (unsigned long __user *)data;
++ unsigned long __user *datap = (__force unsigned long __user *)data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+@@ -1021,14 +1021,14 @@ long arch_ptrace(struct task_struct *chi
+ if (addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+- (struct user_desc __user *) data);
++ (__force struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ if (addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+- (struct user_desc __user *) data, 0);
++ (__force struct user_desc __user *) data, 0);
+ break;
+ #endif
+
+@@ -1047,12 +1047,12 @@ long arch_ptrace(struct task_struct *chi
+ #ifdef CONFIG_X86_PTRACE_BTS
+ case PTRACE_BTS_CONFIG:
+ ret = ptrace_bts_config
+- (child, data, (struct ptrace_bts_config __user *)addr);
++ (child, data, (__force struct ptrace_bts_config __user *)addr);
+ break;
+
+ case PTRACE_BTS_STATUS:
+ ret = ptrace_bts_status
+- (child, data, (struct ptrace_bts_config __user *)addr);
++ (child, data, (__force struct ptrace_bts_config __user *)addr);
+ break;
+
+ case PTRACE_BTS_SIZE:
+@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
+
+ case PTRACE_BTS_GET:
+ ret = ptrace_bts_read_record
+- (child, data, (struct bts_struct __user *) addr);
++ (child, data, (__force struct bts_struct __user *) addr);
+ break;
+
+ case PTRACE_BTS_CLEAR:
+@@ -1070,7 +1070,7 @@ long arch_ptrace(struct task_struct *chi
+
+ case PTRACE_BTS_DRAIN:
+ ret = ptrace_bts_drain
+- (child, data, (struct bts_struct __user *) addr);
++ (child, data, (__force struct bts_struct __user *) addr);
+ break;
+ #endif /* CONFIG_X86_PTRACE_BTS */
+
+@@ -1454,7 +1454,7 @@ void send_sigtrap(struct task_struct *ts
+ info.si_code = si_code;
+
+ /* User-mode ip? */
+- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
++ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
+
+ /* Send us the fake SIGTRAP */
+ force_sig_info(SIGTRAP, &info, tsk);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/reboot.c linux-2.6.31.7/arch/x86/kernel/reboot.c
+--- linux-2.6.31.7/arch/x86/kernel/reboot.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/reboot.c 2009-12-08 17:39:42.901664521 -0500
+@@ -31,7 +31,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_KBD;
+ int reboot_force;
+
+@@ -257,7 +257,7 @@ static struct dmi_system_id __initdata r
+ DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
+ },
+ },
+- { }
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ static int __init reboot_init(void)
+@@ -273,12 +273,12 @@ core_initcall(reboot_init);
+ controller to pulse the CPU reset line, which is more thorough, but
+ doesn't work with at least one type of 486 motherboard. It is easy
+ to stop this code working; hence the copious comments. */
+-static const unsigned long long
+-real_mode_gdt_entries [3] =
++static struct desc_struct
++real_mode_gdt_entries [3] __read_only =
+ {
+- 0x0000000000000000ULL, /* Null descriptor */
+- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
+- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
++ {{{0x00000000, 0x00000000}}}, /* Null descriptor */
++ {{{0x0000ffff, 0x00009b00}}}, /* 16-bit real-mode 64k code at 0x00000000 */
++ {{{0x0100ffff, 0x00009300}}} /* 16-bit real-mode 64k data at 0x00000100 */
+ };
+
+ static const struct desc_ptr
+@@ -327,7 +327,7 @@ static const unsigned char jump_to_bios
+ * specified by the code and length parameters.
+ * We assume that length will aways be less that 100!
+ */
+-void machine_real_restart(const unsigned char *code, int length)
++void machine_real_restart(const unsigned char *code, unsigned int length)
+ {
+ local_irq_disable();
+
+@@ -347,8 +347,8 @@ void machine_real_restart(const unsigned
+ /* Remap the kernel at virtual address zero, as well as offset zero
+ from the kernel segment. This assumes the kernel segment starts at
+ virtual address PAGE_OFFSET. */
+- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * Use `swapper_pg_dir' as our page directory.
+@@ -360,16 +360,15 @@ void machine_real_restart(const unsigned
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* For the switch to real mode, copy some code to low memory. It has
+ to be in the first 64k because it is running in 16-bit mode, and it
+ has to have the same physical and virtual address, because it turns
+ off paging. Copy it near the end of the first page, out of the way
+ of BIOS variables. */
+- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
+- real_mode_switch, sizeof (real_mode_switch));
+- memcpy((void *)(0x1000 - 100), code, length);
++ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
++ memcpy(__va(0x1000 - 100), code, length);
+
+ /* Set up the IDT for real mode. */
+ load_idt(&real_mode_idt);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/setup.c linux-2.6.31.7/arch/x86/kernel/setup.c
+--- linux-2.6.31.7/arch/x86/kernel/setup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/setup.c 2009-12-08 17:39:42.902663076 -0500
+@@ -768,14 +768,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_sdata);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/setup_percpu.c linux-2.6.31.7/arch/x86/kernel/setup_percpu.c
+--- linux-2.6.31.7/arch/x86/kernel/setup_percpu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/setup_percpu.c 2009-12-08 17:39:42.902663076 -0500
+@@ -25,19 +25,17 @@
+ # define DBG(x...)
+ #endif
+
++#ifdef CONFIG_SMP
+ DEFINE_PER_CPU(int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -429,13 +427,15 @@ early_param("percpu_alloc", percpu_alloc
+ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+- struct desc_struct gdt;
+-
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
+- write_gdt_entry(get_cpu_gdt_table(cpu),
+- GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
++ struct desc_struct d, *gdt = get_cpu_gdt_table(cpu);
++ unsigned long base = per_cpu_offset(cpu);
++ const unsigned long limit = VMALLOC_END - base - 1;
++
++ if (limit < 64*1024)
++ pack_descriptor(&d, base, limit, 0x80 | DESCTYPE_S | 0x3, 0x4);
++ else
++ pack_descriptor(&d, base, limit >> PAGE_SHIFT, 0x80 | DESCTYPE_S | 0x3, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
+ #endif
+ }
+
+@@ -486,6 +486,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_x86_32
++ unsigned long canary = per_cpu(stack_canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -513,6 +518,12 @@ void __init setup_per_cpu_areas(void)
+ early_per_cpu_map(x86_cpu_to_node_map, cpu);
+ #endif
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_x86_32
++ if (cpu == boot_cpu_id)
++ per_cpu(stack_canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .data.init
+ * area. Reload any changed state for the boot CPU.
+diff -urNp linux-2.6.31.7/arch/x86/kernel/signal.c linux-2.6.31.7/arch/x86/kernel/signal.c
+--- linux-2.6.31.7/arch/x86/kernel/signal.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/signal.c 2009-12-08 17:39:42.903664175 -0500
+@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ /* save i387 state */
+ if (used_math() && save_i387_xstate(*fpstate) < 0)
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ return (void __user *)sp;
+ }
+@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+
+ if (err)
+ return -EFAULT;
+@@ -377,7 +377,7 @@ static int __setup_rt_frame(int sig, str
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -389,7 +389,7 @@ static int __setup_rt_frame(int sig, str
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -789,7 +789,7 @@ static void do_signal(struct pt_regs *re
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/smpboot.c linux-2.6.31.7/arch/x86/kernel/smpboot.c
+--- linux-2.6.31.7/arch/x86/kernel/smpboot.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/smpboot.c 2009-12-08 17:39:42.903664175 -0500
+@@ -727,7 +727,11 @@ do_rest:
+ (unsigned long)task_stack_page(c_idle.idle) -
+ KERNEL_STACK_OFFSET + THREAD_SIZE;
+ #endif
++
++ pax_open_kernel();
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++ pax_close_kernel();
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start.sp = (void *) c_idle.idle->thread.sp;
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/step.c linux-2.6.31.7/arch/x86/kernel/step.c
+--- linux-2.6.31.7/arch/x86/kernel/step.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/step.c 2009-12-08 17:39:42.904662271 -0500
+@@ -23,22 +23,20 @@ unsigned long convert_ip_to_linear(struc
+ * and APM bios ones we just ignore here.
+ */
+ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+- u32 *desc;
++ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
+- addr = -1L; /* bogus selector, access would fault */
++ if (unlikely(seg >= child->mm->context.size))
++ addr = -EINVAL;
+ else {
+- desc = child->mm->context.ldt + seg;
+- base = ((desc[0] >> 16) |
+- ((desc[1] & 0xff) << 16) |
+- (desc[1] & 0xff000000));
++ desc = &child->mm->context.ldt[seg];
++ base = (desc->a >> 16) | ((desc->b & 0xff) << 16) | (desc->b & 0xff000000);
+
+ /* 16-bit code segment? */
+- if (!((desc[1] >> 22) & 1))
++ if (!((desc->b >> 22) & 1))
+ addr &= 0xffff;
+ addr += base;
+ }
+@@ -54,6 +52,9 @@ static int is_setting_trap_flag(struct t
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+@@ -75,7 +76,7 @@ static int is_setting_trap_flag(struct t
+
+ #ifdef CONFIG_X86_64
+ case 0x40 ... 0x4f:
+- if (regs->cs != __USER_CS)
++ if ((regs->cs & 0xffff) != __USER_CS)
+ /* 32-bit mode: register increment */
+ return 0;
+ /* 64-bit mode: REX prefix */
+diff -urNp linux-2.6.31.7/arch/x86/kernel/syscall_table_32.S linux-2.6.31.7/arch/x86/kernel/syscall_table_32.S
+--- linux-2.6.31.7/arch/x86/kernel/syscall_table_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/syscall_table_32.S 2009-12-08 17:39:42.905663462 -0500
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urNp linux-2.6.31.7/arch/x86/kernel/sys_i386_32.c linux-2.6.31.7/arch/x86/kernel/sys_i386_32.c
+--- linux-2.6.31.7/arch/x86/kernel/sys_i386_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/sys_i386_32.c 2009-12-08 17:39:42.904662271 -0500
+@@ -24,6 +24,21 @@
+
+ #include <asm/syscalls.h>
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+@@ -83,6 +98,205 @@ out:
+ return err;
+ }
+
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (pax_task_size - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (!vma || addr + len <= vma->vm_start) {
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++ }
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (pax_task_size - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ vma = find_vma(mm, addr-len);
++ if (!vma || addr <= vma->vm_start)
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr-len);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (!vma || addr+len <= vma->vm_start)
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = vma->vm_start-len;
++ } while (len < vma->vm_start);
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
++}
+
+ struct sel_arg_struct {
+ unsigned long n;
+@@ -118,7 +332,7 @@ asmlinkage int sys_ipc(uint call, int fi
+ return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
+ case SEMTIMEDOP:
+ return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
+- (const struct timespec __user *)fifth);
++ (__force const struct timespec __user *)fifth);
+
+ case SEMGET:
+ return sys_semget(first, second, third);
+@@ -165,7 +379,7 @@ asmlinkage int sys_ipc(uint call, int fi
+ ret = do_shmat(first, (char __user *) ptr, second, &raddr);
+ if (ret)
+ return ret;
+- return put_user(raddr, (ulong __user *) third);
++ return put_user(raddr, (__force ulong __user *) third);
+ }
+ case 1: /* iBCS2 emulator entry point */
+ if (!segment_eq(get_fs(), get_ds()))
+diff -urNp linux-2.6.31.7/arch/x86/kernel/sys_x86_64.c linux-2.6.31.7/arch/x86/kernel/sys_x86_64.c
+--- linux-2.6.31.7/arch/x86/kernel/sys_x86_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/sys_x86_64.c 2009-12-08 17:39:42.905663462 -0500
+@@ -47,8 +47,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -67,7 +67,7 @@ static void find_start_end(unsigned long
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mm->mmap_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -84,11 +84,15 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+@@ -143,7 +147,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -156,6 +160,10 @@ arch_get_unmapped_area_topdown(struct fi
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+@@ -213,13 +221,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/time_32.c linux-2.6.31.7/arch/x86/kernel/time_32.c
+--- linux-2.6.31.7/arch/x86/kernel/time_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/time_32.c 2009-12-08 17:39:42.906659653 -0500
+@@ -47,22 +47,32 @@ unsigned long profile_pc(struct pt_regs
+ unsigned long pc = instruction_pointer(regs);
+
+ #ifdef CONFIG_SMP
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp = (unsigned long *)&regs->sp;
+
+ /* Return address is either directly at stack pointer
+ or above a saved flags. Eflags has bits 22-31 zero,
+ kernel addresses don't. */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ #endif
++
++ if (!user_mode(regs))
++ pc = ktla_ktva(pc);
++
+ return pc;
+ }
+ EXPORT_SYMBOL(profile_pc);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/time_64.c linux-2.6.31.7/arch/x86/kernel/time_64.c
+--- linux-2.6.31.7/arch/x86/kernel/time_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/time_64.c 2009-12-08 17:39:42.906659653 -0500
+@@ -25,8 +25,6 @@
+ #include <asm/time.h>
+ #include <asm/timer.h>
+
+-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+-
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+@@ -34,7 +32,7 @@ unsigned long profile_pc(struct pt_regs
+ /* Assume the lock function has either no stack frame or a copy
+ of flags from PUSHF
+ Eflags always has bits 22 and up cleared unlike kernel addresses. */
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+ return *(unsigned long *)(regs->bp + sizeof(long));
+ #else
+diff -urNp linux-2.6.31.7/arch/x86/kernel/tls.c linux-2.6.31.7/arch/x86/kernel/tls.c
+--- linux-2.6.31.7/arch/x86/kernel/tls.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/tls.c 2009-12-08 17:39:42.906659653 -0500
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+diff -urNp linux-2.6.31.7/arch/x86/kernel/trampoline_32.S linux-2.6.31.7/arch/x86/kernel/trampoline_32.S
+--- linux-2.6.31.7/arch/x86/kernel/trampoline_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/trampoline_32.S 2009-12-08 17:39:42.907663038 -0500
+@@ -31,6 +31,12 @@
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /* We can free up trampoline after bootup if cpu hotplug is not supported. */
+ #ifndef CONFIG_HOTPLUG_CPU
+ .section ".cpuinit.data","aw",@progbits
+@@ -64,7 +70,7 @@ r_base = .
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff -urNp linux-2.6.31.7/arch/x86/kernel/traps.c linux-2.6.31.7/arch/x86/kernel/traps.c
+--- linux-2.6.31.7/arch/x86/kernel/traps.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/traps.c 2009-12-08 17:39:42.908663659 -0500
+@@ -70,14 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.. We have a special link segment
+- * for this.
+- */
+-gate_desc idt_table[256]
+- __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -115,7 +107,7 @@ static inline void preempt_conditional_c
+ static inline void
+ die_if_kernel(const char *str, struct pt_regs *regs, long err)
+ {
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ die(str, regs, err);
+ }
+ #endif
+@@ -127,7 +119,7 @@ do_trap(int trapnr, int signr, char *str
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -138,7 +130,7 @@ do_trap(int trapnr, int signr, char *str
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -161,7 +153,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -180,6 +172,12 @@ kernel_trap:
+ tsk->thread.trap_no = trapnr;
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -268,14 +266,30 @@ do_general_protection(struct pt_regs *re
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+
+@@ -308,6 +322,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -561,7 +582,7 @@ dotraplinkage void __kprobes do_debug(st
+ }
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto debug_vm86;
+ #endif
+
+@@ -573,7 +594,7 @@ dotraplinkage void __kprobes do_debug(st
+ * kernel space (but re-enable TF when returning to user mode).
+ */
+ if (condition & DR_STEP) {
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto clear_TF_reenable;
+ }
+
+@@ -760,7 +781,7 @@ do_simd_coprocessor_error(struct pt_regs
+ * Handle strange cache flush from user space exception
+ * in all other cases. This is undocumented behaviour.
+ */
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
+ return;
+ }
+@@ -789,19 +810,14 @@ do_spurious_interrupt_bug(struct pt_regs
+ #ifdef CONFIG_X86_32
+ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
+ {
+- struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
+ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+ unsigned long new_kesp = kesp - base;
+ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+- __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
++ struct desc_struct ss;
+
+ /* Set up base for espfix segment */
+- desc &= 0x00f0ff0000000000ULL;
+- desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+- ((((__u64)base) << 32) & 0xff00000000000000ULL) |
+- ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
+- (lim_pages & 0xffff);
+- *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
++ pack_descriptor(&ss, base, lim_pages, 0x93, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(smp_processor_id()), GDT_ENTRY_ESPFIX_SS, &ss, DESCTYPE_S);
+
+ return new_kesp;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/kernel/tsc.c linux-2.6.31.7/arch/x86/kernel/tsc.c
+--- linux-2.6.31.7/arch/x86/kernel/tsc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/tsc.c 2009-12-08 17:39:42.908663659 -0500
+@@ -790,7 +790,7 @@ static struct dmi_system_id __initdata b
+ DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
+ },
+ },
+- {}
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ static void __init check_system_tsc_reliable(void)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/vm86_32.c linux-2.6.31.7/arch/x86/kernel/vm86_32.c
+--- linux-2.6.31.7/arch/x86/kernel/vm86_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/vm86_32.c 2009-12-08 17:39:42.909664436 -0500
+@@ -41,6 +41,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/audit.h>
+ #include <linux/stddef.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
+ struct task_struct *tsk;
+ int tmp, ret = -EPERM;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ if (tsk->thread.saved_sp0)
+ goto out;
+@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
+ int tmp, ret;
+ struct vm86plus_struct __user *v86;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ switch (regs->bx) {
+ case VM86_REQUEST_IRQ:
+@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
+ goto cannot_handle;
+ if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ goto cannot_handle;
+- intr_ptr = (unsigned long __user *) (i << 2);
++ intr_ptr = (__force unsigned long __user *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
+ goto cannot_handle;
+ if ((segoffs >> 16) == BIOSSEG)
+diff -urNp linux-2.6.31.7/arch/x86/kernel/vmi_32.c linux-2.6.31.7/arch/x86/kernel/vmi_32.c
+--- linux-2.6.31.7/arch/x86/kernel/vmi_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/vmi_32.c 2009-12-08 17:39:42.909664436 -0500
+@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
+ typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
+
+ #define call_vrom_func(rom,func) \
+- (((VROMFUNC *)(rom->func))())
++ (((VROMFUNC *)(ktva_ktla(rom.func)))())
+
+ #define call_vrom_long_func(rom,func,arg) \
+- (((VROMLONGFUNC *)(rom->func)) (arg))
++({\
++ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
++ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
++ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
++ __reloc;\
++})
+
+-static struct vrom_header *vmi_rom;
++static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
+ static int disable_pge;
+ static int disable_pse;
+ static int disable_sep;
+@@ -76,10 +81,10 @@ static struct {
+ void (*set_initial_ap_state)(int, int);
+ void (*halt)(void);
+ void (*set_lazy_mode)(int mode);
+-} vmi_ops;
++} vmi_ops __read_only;
+
+ /* Cached VMI operations */
+-struct vmi_timer_ops vmi_timer_ops;
++struct vmi_timer_ops vmi_timer_ops __read_only;
+
+ /*
+ * VMI patching routines.
+@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
+ static inline void patch_offset(void *insnbuf,
+ unsigned long ip, unsigned long dest)
+ {
+- *(unsigned long *)(insnbuf+1) = dest-ip-5;
++ *(unsigned long *)(insnbuf+1) = dest-ip-5;
+ }
+
+ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
+@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
+ {
+ u64 reloc;
+ struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
++
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
+ switch(rel->type) {
+ case VMI_RELOCATION_CALL_REL:
+@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
+
+ static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+- const pte_t pte = { .pte = 0 };
++ const pte_t pte = __pte(0ULL);
+ vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
+ }
+
+ static void vmi_pmd_clear(pmd_t *pmd)
+ {
+- const pte_t pte = { .pte = 0 };
++ const pte_t pte = __pte(0ULL);
+ vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
+ }
+ #endif
+@@ -438,8 +444,8 @@ vmi_startup_ipi_hook(int phys_apicid, un
+ ap.ss = __KERNEL_DS;
+ ap.esp = (unsigned long) start_esp;
+
+- ap.ds = __USER_DS;
+- ap.es = __USER_DS;
++ ap.ds = __KERNEL_DS;
++ ap.es = __KERNEL_DS;
+ ap.fs = __KERNEL_PERCPU;
+ ap.gs = __KERNEL_STACK_CANARY;
+
+@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
+ paravirt_leave_lazy_mmu();
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static unsigned long vmi_pax_open_kernel(void)
++{
++ return 0;
++}
++
++static unsigned long vmi_pax_close_kernel(void)
++{
++ return 0;
++}
++#endif
++
+ static inline int __init check_vmi_rom(struct vrom_header *rom)
+ {
+ struct pci_header *pci;
+@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
+ return 0;
+ if (rom->vrom_signature != VMI_SIGNATURE)
+ return 0;
++ if (rom->rom_length * 512 > sizeof(*rom)) {
++ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
++ return 0;
++ }
+ if (rom->api_version_maj != VMI_API_REV_MAJOR ||
+ rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
+ printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
+@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
+ struct vrom_header *romstart;
+ romstart = (struct vrom_header *)isa_bus_to_virt(base);
+ if (check_vmi_rom(romstart)) {
+- vmi_rom = romstart;
++ vmi_rom = *romstart;
+ return 1;
+ }
+ }
+@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
+
+ para_fill(pv_irq_ops.safe_halt, Halt);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
++ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
++#endif
++
+ /*
+ * Alternative instruction rewriting doesn't happen soon enough
+ * to convert VMI_IRET to a call instead of a jump; so we have
+@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
+
+ void __init vmi_init(void)
+ {
+- if (!vmi_rom)
++ if (!vmi_rom.rom_signature)
+ probe_vmi_rom();
+ else
+- check_vmi_rom(vmi_rom);
++ check_vmi_rom(&vmi_rom);
+
+ /* In case probing for or validating the ROM failed, basil */
+- if (!vmi_rom)
++ if (!vmi_rom.rom_signature)
+ return;
+
+- reserve_top_address(-vmi_rom->virtual_top);
++ reserve_top_address(-vmi_rom.virtual_top);
+
+ #ifdef CONFIG_X86_IO_APIC
+ /* This is virtual hardware; timer routing is wired correctly */
+@@ -874,7 +901,7 @@ void __init vmi_activate(void)
+ {
+ unsigned long flags;
+
+- if (!vmi_rom)
++ if (!vmi_rom.rom_signature)
+ return;
+
+ local_irq_save(flags);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/vmlinux.lds.S linux-2.6.31.7/arch/x86/kernel/vmlinux.lds.S
+--- linux-2.6.31.7/arch/x86/kernel/vmlinux.lds.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/vmlinux.lds.S 2009-12-08 17:39:42.910664932 -0500
+@@ -26,6 +26,22 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#undef PMD_SIZE
++#undef PMD_SHIFT
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++#define PMD_SHIFT 21
++#else
++#define PMD_SHIFT 22
++#endif
++#define PMD_SIZE (1 << PMD_SHIFT)
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ #undef i386 /* in case the preprocessor is a 32bit one */
+
+@@ -34,46 +50,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
+ #ifdef CONFIG_X86_32
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+-jiffies = jiffies_64;
+ #else
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+-jiffies_64 = jiffies;
+ #endif
+
++jiffies = jiffies_64;
++
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
+- data PT_LOAD FLAGS(7); /* RWE */
++ rodata PT_LOAD FLAGS(4); /* R__ */
++ module PT_LOAD FLAGS(5); /* R_E */
++ data PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_X86_64
+- user PT_LOAD FLAGS(7); /* RWE */
++ user PT_LOAD FLAGS(5); /* R_E */
++#endif
++ init.begin PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_SMP
+- percpu PT_LOAD FLAGS(7); /* RWE */
++ percpu PT_LOAD FLAGS(6); /* RW_ */
+ #endif
++ text.init PT_LOAD FLAGS(5); /* R_E */
++ text.exit PT_LOAD FLAGS(5); /* R_E */
+ init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ note PT_NOTE FLAGS(0); /* ___ */
+ }
+
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+- . = __START_KERNEL;
+- phys_startup_64 = startup_64 - LOAD_OFFSET;
++ . = __START_KERNEL;
+ #endif
+
+ /* Text and read-only data */
+
+- /* bootstrapping code */
+- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ /* bootstrapping code */
++#ifdef CONFIG_X86_32
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+ _text = .;
+ *(.text.head)
+- } :text = 0x9090
+
+- /* The rest of the text */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
++ /* The rest of the text */
+ #ifdef CONFIG_X86_32
+ /* not really needed, already page aligned */
+ . = ALIGN(PAGE_SIZE);
+@@ -88,11 +111,12 @@ SECTIONS
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
+- NOTES :text :note
++ . += __KERNEL_TEXT_OFFSET;
++
++ . = ALIGN(PAGE_SIZE);
++ NOTES :rodata :note
+
+ /* Exception table */
+ . = ALIGN(16);
+@@ -100,22 +124,57 @@ SECTIONS
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+- } :text = 0x9090
++ } :rodata
+
+ RO_DATA(PAGE_SIZE)
+
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.swapper_pg_pmd)
++ *(.swapper_pg_dir)
++ }
++#endif
++
++ . = ALIGN(PAGE_SIZE);
++ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
++ *(.vmi.rom)
++ } :module
++
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
++ MODULES_EXEC_VADDR = .;
++ BYTE(0)
++ . += (8 * 1024 * 1024);
++ . = ALIGN(PMD_SIZE);
++ MODULES_EXEC_END = . - 1;
++#endif
++
++ /* End of text section */
++ _etext = . - __KERNEL_TEXT_OFFSET;
++ } :module
++
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(PMD_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ /* Start of data section */
+ _sdata = .;
+
+ /* init_task */
+ INIT_TASK_DATA(THREAD_SIZE)
+
+-#ifdef CONFIG_X86_32
+- /* 32 bit has nosave before _edata */
+ NOSAVE_DATA
+-#endif
+
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ *(.data.idt)
+@@ -182,12 +241,6 @@ SECTIONS
+ }
+ vgetcpu_mode = VVIRT(.vgetcpu_mode);
+
+- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+- .jiffies : AT(VLOAD(.jiffies)) {
+- *(.jiffies)
+- }
+- jiffies = VVIRT(.jiffies);
+-
+ .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
+ *(.vsyscall_3)
+ }
+@@ -205,12 +258,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+
+ /* Init code and data - will be freed after init */
+- . = ALIGN(PAGE_SIZE);
+ .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++ BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(PMD_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ __init_begin = .; /* paired with __init_end */
+- }
++ } :init.begin
+
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - .init.text - should
+@@ -219,18 +279,26 @@ SECTIONS
+ PERCPU_VADDR(0, :percpu)
+ #endif
+
+- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
++ init_begin = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
+ _sinittext = .;
+ INIT_TEXT
+ _einittext = .;
+- }
+-#ifdef CONFIG_X86_64
+- :init
+-#endif
++ } :text.init
++
++ /*
++ * .exit.text is discard at runtime, not link time, to deal with
++ * references from .altinstructions and .eh_frame
++ */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
++ EXIT_TEXT
++ . = ALIGN(16);
++ } :text.exit
++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
+
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
+ INIT_DATA
+- }
++ } :init
+
+ . = ALIGN(16);
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+@@ -276,14 +344,6 @@ SECTIONS
+ *(.altinstr_replacement)
+ }
+
+- /*
+- * .exit.text is discard at runtime, not link time, to deal with
+- * references from .altinstructions and .eh_frame
+- */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+-
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+@@ -297,7 +357,7 @@ SECTIONS
+ }
+ #endif
+
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+ PERCPU(PAGE_SIZE)
+ #endif
+
+@@ -320,12 +380,6 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ }
+
+-#ifdef CONFIG_X86_64
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- NOSAVE_DATA
+- }
+-#endif
+-
+ /* BSS */
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -341,6 +395,7 @@ SECTIONS
+ __brk_base = .;
+ . += 64 * 1024; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(PMD_SIZE);
+ __brk_limit = .;
+ }
+
+@@ -369,13 +424,12 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+ * Build-time check on the image size:
+ */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.31.7/arch/x86/kernel/vsyscall_64.c linux-2.6.31.7/arch/x86/kernel/vsyscall_64.c
+--- linux-2.6.31.7/arch/x86/kernel/vsyscall_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/vsyscall_64.c 2009-12-08 17:39:42.910664932 -0500
+@@ -79,6 +79,7 @@ void update_vsyscall(struct timespec *wa
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* copy vsyscall data */
++ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
+ vsyscall_gtod_data.clock.vread = clock->vread;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+@@ -201,7 +202,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
+ We do this here because otherwise user space would do it on
+ its own in a likely inferior way (no access to jiffies).
+ If you don't like it pass NULL. */
+- if (tcache && tcache->blob[0] == (j = __jiffies)) {
++ if (tcache && tcache->blob[0] == (j = jiffies)) {
+ p = tcache->blob[1];
+ } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
+ /* Load per CPU data from RDTSCP */
+@@ -240,13 +241,13 @@ static ctl_table kernel_table2[] = {
+ .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = vsyscall_sysctl_change },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static ctl_table kernel_root_table2[] = {
+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
+ .child = kernel_table2 },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+ #endif
+
+diff -urNp linux-2.6.31.7/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.31.7/arch/x86/kernel/x8664_ksyms_64.c
+--- linux-2.6.31.7/arch/x86/kernel/x8664_ksyms_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/x8664_ksyms_64.c 2009-12-08 17:39:42.911661629 -0500
+@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
+
+ EXPORT_SYMBOL(copy_user_generic);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(__copy_from_user_inatomic);
+
+ EXPORT_SYMBOL(copy_page);
+diff -urNp linux-2.6.31.7/arch/x86/kernel/xsave.c linux-2.6.31.7/arch/x86/kernel/xsave.c
+--- linux-2.6.31.7/arch/x86/kernel/xsave.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kernel/xsave.c 2009-12-08 17:39:42.911661629 -0500
+@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
+ fx_sw_user->xstate_size > fx_sw_user->extended_size)
+ return -1;
+
+- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
+ fx_sw_user->extended_size -
+ FP_XSTATE_MAGIC2_SIZE));
+ /*
+@@ -196,7 +196,7 @@ fx_only:
+ * the other extended state.
+ */
+ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
+- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
++ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
+ }
+
+ /*
+@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
+ if (task_thread_info(tsk)->status & TS_XSAVE)
+ err = restore_user_xstate(buf);
+ else
+- err = fxrstor_checking((__force struct i387_fxsave_struct *)
++ err = fxrstor_checking((struct i387_fxsave_struct __user *)
+ buf);
+ if (unlikely(err)) {
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/kvm/svm.c linux-2.6.31.7/arch/x86/kvm/svm.c
+--- linux-2.6.31.7/arch/x86/kvm/svm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kvm/svm.c 2009-12-08 17:39:42.912660095 -0500
+@@ -2300,9 +2300,12 @@ static int handle_exit(struct kvm_run *k
+ static void reload_tss(struct kvm_vcpu *vcpu)
+ {
+ int cpu = raw_smp_processor_id();
+-
+ struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
++
++ pax_open_kernel();
+ svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -2686,7 +2689,7 @@ static u64 svm_get_mt_mask(struct kvm_vc
+ return 0;
+ }
+
+-static struct kvm_x86_ops svm_x86_ops = {
++static const struct kvm_x86_ops svm_x86_ops = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+ .hardware_setup = svm_hardware_setup,
+diff -urNp linux-2.6.31.7/arch/x86/kvm/vmx.c linux-2.6.31.7/arch/x86/kvm/vmx.c
+--- linux-2.6.31.7/arch/x86/kvm/vmx.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kvm/vmx.c 2009-12-08 17:39:42.914665218 -0500
+@@ -521,7 +521,11 @@ static void reload_tss(void)
+
+ kvm_get_gdt(&gdt);
+ descs = (void *)gdt.base;
++
++ pax_open_kernel();
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -1336,8 +1340,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+- if (!cpu_has_vmx_tpr_shadow())
+- kvm_x86_ops->update_cr8_intercept = NULL;
++ if (!cpu_has_vmx_tpr_shadow()) {
++ pax_open_kernel();
++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++ pax_close_kernel();
++ }
+
+ return alloc_kvm_area();
+ }
+@@ -2239,7 +2246,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+@@ -3493,6 +3500,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "xchg %0, (%%"R"sp) \n\t"
+ "mov %%"R"ax, %c[rax](%0) \n\t"
+@@ -3539,6 +3552,11 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -3555,7 +3573,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ if (vmx->rmode.irq.pending)
+ fixup_rmode_irq(vmx);
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS));
+ vmx->launched = 1;
+
+ vmx_complete_interrupts(vmx);
+@@ -3698,7 +3716,7 @@ static u64 vmx_get_mt_mask(struct kvm_vc
+ return ret;
+ }
+
+-static struct kvm_x86_ops vmx_x86_ops = {
++static const struct kvm_x86_ops vmx_x86_ops = {
+ .cpu_has_kvm_support = cpu_has_kvm_support,
+ .disabled_by_bios = vmx_disabled_by_bios,
+ .hardware_setup = hardware_setup,
+diff -urNp linux-2.6.31.7/arch/x86/kvm/x86.c linux-2.6.31.7/arch/x86/kvm/x86.c
+--- linux-2.6.31.7/arch/x86/kvm/x86.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kvm/x86.c 2009-12-08 17:39:42.920668429 -0500
+@@ -73,42 +73,42 @@ static int kvm_dev_ioctl_get_supported_c
+ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index);
+
+-struct kvm_x86_ops *kvm_x86_ops;
++const struct kvm_x86_ops *kvm_x86_ops;
+ EXPORT_SYMBOL_GPL(kvm_x86_ops);
+
+ struct kvm_stats_debugfs_item debugfs_entries[] = {
+- { "pf_fixed", VCPU_STAT(pf_fixed) },
+- { "pf_guest", VCPU_STAT(pf_guest) },
+- { "tlb_flush", VCPU_STAT(tlb_flush) },
+- { "invlpg", VCPU_STAT(invlpg) },
+- { "exits", VCPU_STAT(exits) },
+- { "io_exits", VCPU_STAT(io_exits) },
+- { "mmio_exits", VCPU_STAT(mmio_exits) },
+- { "signal_exits", VCPU_STAT(signal_exits) },
+- { "irq_window", VCPU_STAT(irq_window_exits) },
+- { "nmi_window", VCPU_STAT(nmi_window_exits) },
+- { "halt_exits", VCPU_STAT(halt_exits) },
+- { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+- { "hypercalls", VCPU_STAT(hypercalls) },
+- { "request_irq", VCPU_STAT(request_irq_exits) },
+- { "irq_exits", VCPU_STAT(irq_exits) },
+- { "host_state_reload", VCPU_STAT(host_state_reload) },
+- { "efer_reload", VCPU_STAT(efer_reload) },
+- { "fpu_reload", VCPU_STAT(fpu_reload) },
+- { "insn_emulation", VCPU_STAT(insn_emulation) },
+- { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
+- { "irq_injections", VCPU_STAT(irq_injections) },
+- { "nmi_injections", VCPU_STAT(nmi_injections) },
+- { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
+- { "mmu_pte_write", VM_STAT(mmu_pte_write) },
+- { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
+- { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
+- { "mmu_flooded", VM_STAT(mmu_flooded) },
+- { "mmu_recycled", VM_STAT(mmu_recycled) },
+- { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
+- { "mmu_unsync", VM_STAT(mmu_unsync) },
+- { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
+- { "largepages", VM_STAT(lpages) },
++ { "pf_fixed", VCPU_STAT(pf_fixed), NULL },
++ { "pf_guest", VCPU_STAT(pf_guest), NULL },
++ { "tlb_flush", VCPU_STAT(tlb_flush), NULL },
++ { "invlpg", VCPU_STAT(invlpg), NULL },
++ { "exits", VCPU_STAT(exits), NULL },
++ { "io_exits", VCPU_STAT(io_exits), NULL },
++ { "mmio_exits", VCPU_STAT(mmio_exits), NULL },
++ { "signal_exits", VCPU_STAT(signal_exits), NULL },
++ { "irq_window", VCPU_STAT(irq_window_exits), NULL },
++ { "nmi_window", VCPU_STAT(nmi_window_exits), NULL },
++ { "halt_exits", VCPU_STAT(halt_exits), NULL },
++ { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL },
++ { "hypercalls", VCPU_STAT(hypercalls), NULL },
++ { "request_irq", VCPU_STAT(request_irq_exits), NULL },
++ { "irq_exits", VCPU_STAT(irq_exits), NULL },
++ { "host_state_reload", VCPU_STAT(host_state_reload), NULL },
++ { "efer_reload", VCPU_STAT(efer_reload), NULL },
++ { "fpu_reload", VCPU_STAT(fpu_reload), NULL },
++ { "insn_emulation", VCPU_STAT(insn_emulation), NULL },
++ { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL },
++ { "irq_injections", VCPU_STAT(irq_injections), NULL },
++ { "nmi_injections", VCPU_STAT(nmi_injections), NULL },
++ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL },
++ { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL },
++ { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL },
++ { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL },
++ { "mmu_flooded", VM_STAT(mmu_flooded), NULL },
++ { "mmu_recycled", VM_STAT(mmu_recycled), NULL },
++ { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL },
++ { "mmu_unsync", VM_STAT(mmu_unsync), NULL },
++ { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL },
++ { "largepages", VM_STAT(lpages), NULL },
+ { NULL }
+ };
+
+@@ -1492,7 +1492,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -2817,10 +2817,10 @@ static struct notifier_block kvmclock_cp
+ .notifier_call = kvmclock_cpufreq_notifier
+ };
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r, cpu;
+- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
++ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
+
+ if (kvm_x86_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+diff -urNp linux-2.6.31.7/arch/x86/kvm/x86_emulate.c linux-2.6.31.7/arch/x86/kvm/x86_emulate.c
+--- linux-2.6.31.7/arch/x86/kvm/x86_emulate.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/kvm/x86_emulate.c 2009-12-08 17:39:42.922547860 -0500
+@@ -382,6 +382,7 @@ static u32 group2_table[] = {
+
+ #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
+ do { \
++ unsigned long _tmp; \
+ __asm__ __volatile__ ( \
+ _PRE_EFLAGS("0", "4", "2") \
+ _op _suffix " %"_x"3,%1; " \
+@@ -395,8 +396,6 @@ static u32 group2_table[] = {
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+- \
+ switch ((_dst).bytes) { \
+ case 2: \
+ ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
+@@ -412,7 +411,6 @@ static u32 group2_table[] = {
+
+ #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+ switch ((_dst).bytes) { \
+ case 1: \
+ ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
+diff -urNp linux-2.6.31.7/arch/x86/lib/checksum_32.S linux-2.6.31.7/arch/x86/lib/checksum_32.S
+--- linux-2.6.31.7/arch/x86/lib/checksum_32.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/checksum_32.S 2009-12-08 17:39:42.923665804 -0500
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp csum_partial_copy_generic
++
++ENTRY(csum_partial_copy_generic_from_user)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %edi
+@@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -343,30 +357,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -408,7 +422,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -421,11 +435,19 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE ebx
+@@ -439,26 +461,41 @@ DST( movb %cl, (%edi) )
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp csum_partial_copy_generic
++
++ENTRY(csum_partial_copy_generic_from_user)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++
++ENTRY(csum_partial_copy_generic)
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+@@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -523,10 +560,18 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
+ popl %esi
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE esi
+@@ -538,7 +583,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff -urNp linux-2.6.31.7/arch/x86/lib/clear_page_64.S linux-2.6.31.7/arch/x86/lib/clear_page_64.S
+--- linux-2.6.31.7/arch/x86/lib/clear_page_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/clear_page_64.S 2009-12-08 17:39:42.923665804 -0500
+@@ -43,7 +43,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.31.7/arch/x86/lib/copy_page_64.S linux-2.6.31.7/arch/x86/lib/copy_page_64.S
+--- linux-2.6.31.7/arch/x86/lib/copy_page_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/copy_page_64.S 2009-12-08 17:39:42.923665804 -0500
+@@ -104,7 +104,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.31.7/arch/x86/lib/copy_user_64.S linux-2.6.31.7/arch/x86/lib/copy_user_64.S
+--- linux-2.6.31.7/arch/x86/lib/copy_user_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/copy_user_64.S 2009-12-08 17:39:42.924668955 -0500
+@@ -21,7 +21,7 @@
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt-1b /* offset */ /* or alternatively to alt */
+ .previous
+@@ -64,32 +64,6 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- jae bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-ENDPROC(copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- jae bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-ENDPROC(copy_from_user)
+-
+ ENTRY(copy_user_generic)
+ CFI_STARTPROC
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+@@ -107,6 +81,8 @@ ENDPROC(__copy_from_user_inatomic)
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+diff -urNp linux-2.6.31.7/arch/x86/lib/getuser.S linux-2.6.31.7/arch/x86/lib/getuser.S
+--- linux-2.6.31.7/arch/x86/lib/getuser.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/getuser.S 2009-12-08 17:39:42.924668955 -0500
+@@ -33,6 +33,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
+
+ .text
+ ENTRY(__get_user_1)
+@@ -40,7 +41,19 @@ ENTRY(__get_user_1)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 1: movzb (%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -53,7 +66,19 @@ ENTRY(__get_user_2)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 2: movzwl -1(%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -66,7 +91,19 @@ ENTRY(__get_user_4)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 3: mov -3(%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -89,6 +126,12 @@ ENDPROC(__get_user_8)
+
+ bad_get_user:
+ CFI_STARTPROC
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %edx,%edx
+ mov $(-EFAULT),%_ASM_AX
+ ret
+diff -urNp linux-2.6.31.7/arch/x86/lib/memcpy_64.S linux-2.6.31.7/arch/x86/lib/memcpy_64.S
+--- linux-2.6.31.7/arch/x86/lib/memcpy_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/memcpy_64.S 2009-12-08 17:39:42.924668955 -0500
+@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
+ * It is also a lot simpler. Use this when possible:
+ */
+
+- .section .altinstr_replacement, "ax"
++ .section .altinstr_replacement, "a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.31.7/arch/x86/lib/memset_64.S linux-2.6.31.7/arch/x86/lib/memset_64.S
+--- linux-2.6.31.7/arch/x86/lib/memset_64.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/memset_64.S 2009-12-08 17:39:42.924668955 -0500
+@@ -118,7 +118,7 @@ ENDPROC(__memset)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (memset_c - memset) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.31.7/arch/x86/lib/mmx_32.c linux-2.6.31.7/arch/x86/lib/mmx_32.c
+--- linux-2.6.31.7/arch/x86/lib/mmx_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/mmx_32.c 2009-12-08 17:39:42.926529586 -0500
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
+ {
+ void *p;
+ int i;
++ unsigned long cr0;
+
+ if (unlikely(in_interrupt()))
+ return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n" /* This set is 28 bytes */
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n" /* This set is 28 bytes */
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from));
++ : "=&r" (cr0) : "r" (from) : "ax");
+
+ for ( ; i > 5; i--) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__(
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < (4096-320)/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movntq %%mm0, (%1)\n"
+- " movq 8(%0), %%mm1\n"
+- " movntq %%mm1, 8(%1)\n"
+- " movq 16(%0), %%mm2\n"
+- " movntq %%mm2, 16(%1)\n"
+- " movq 24(%0), %%mm3\n"
+- " movntq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm4\n"
+- " movntq %%mm4, 32(%1)\n"
+- " movq 40(%0), %%mm5\n"
+- " movntq %%mm5, 40(%1)\n"
+- " movq 48(%0), %%mm6\n"
+- " movntq %%mm6, 48(%1)\n"
+- " movq 56(%0), %%mm7\n"
+- " movntq %%mm7, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movntq %%mm0, (%2)\n"
++ " movq 8(%1), %%mm1\n"
++ " movntq %%mm1, 8(%2)\n"
++ " movq 16(%1), %%mm2\n"
++ " movntq %%mm2, 16(%2)\n"
++ " movq 24(%1), %%mm3\n"
++ " movntq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm4\n"
++ " movntq %%mm4, 32(%2)\n"
++ " movq 40(%1), %%mm5\n"
++ " movntq %%mm5, 40(%2)\n"
++ " movq 48(%1), %%mm6\n"
++ " movntq %%mm6, 48(%2)\n"
++ " movq 56(%1), %%mm7\n"
++ " movntq %%mm7, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < 4096/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+diff -urNp linux-2.6.31.7/arch/x86/lib/putuser.S linux-2.6.31.7/arch/x86/lib/putuser.S
+--- linux-2.6.31.7/arch/x86/lib/putuser.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/putuser.S 2009-12-08 17:39:42.926529586 -0500
+@@ -15,6 +15,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
+
+
+ /*
+@@ -39,7 +40,19 @@ ENTRY(__put_user_1)
+ ENTER
+ cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 1: movb %al,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_1)
+@@ -50,7 +63,19 @@ ENTRY(__put_user_2)
+ sub $1,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 2: movw %ax,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_2)
+@@ -61,7 +86,19 @@ ENTRY(__put_user_4)
+ sub $3,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 3: movl %eax,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_4)
+@@ -72,16 +109,34 @@ ENTRY(__put_user_8)
+ sub $7,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 4: mov %_ASM_AX,(%_ASM_CX)
+ #ifdef CONFIG_X86_32
+ 5: movl %edx,4(%_ASM_CX)
+ #endif
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_8)
+
+ bad_put_user:
+ CFI_STARTPROC
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ movl $-EFAULT,%eax
+ EXIT
+ END(bad_put_user)
+diff -urNp linux-2.6.31.7/arch/x86/lib/usercopy_32.c linux-2.6.31.7/arch/x86/lib/usercopy_32.c
+--- linux-2.6.31.7/arch/x86/lib/usercopy_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/lib/usercopy_32.c 2009-12-08 17:39:42.928663785 -0500
+@@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned
+ * Copy a null terminated string from userspace.
+ */
+
+-#define __do_strncpy_from_user(dst, src, count, res) \
+-do { \
+- int __d0, __d1, __d2; \
+- might_fault(); \
+- __asm__ __volatile__( \
+- " testl %1,%1\n" \
+- " jz 2f\n" \
+- "0: lodsb\n" \
+- " stosb\n" \
+- " testb %%al,%%al\n" \
+- " jz 1f\n" \
+- " decl %1\n" \
+- " jnz 0b\n" \
+- "1: subl %1,%0\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "3: movl %5,%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- _ASM_EXTABLE(0b,3b) \
+- : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
+- "=&D" (__d2) \
+- : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
+- : "memory"); \
+-} while (0)
++static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
++{
++ int __d0, __d1, __d2;
++ long res = -EFAULT;
++
++ might_fault();
++ __asm__ __volatile__(
++ " movw %w10,%%ds\n"
++ " testl %1,%1\n"
++ " jz 2f\n"
++ "0: lodsb\n"
++ " stosb\n"
++ " testb %%al,%%al\n"
++ " jz 1f\n"
++ " decl %1\n"
++ " jnz 0b\n"
++ "1: subl %1,%0\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movl %5,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ _ASM_EXTABLE(0b,3b)
++ : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),
++ "=&D" (__d2)
++ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
++ "r"(__USER_DS)
++ : "memory");
++ return res;
++}
+
+ /**
+ * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
+@@ -85,9 +92,7 @@ do { \
+ long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+- long res;
+- __do_strncpy_from_user(dst, src, count, res);
+- return res;
++ return __do_strncpy_from_user(dst, src, count);
+ }
+ EXPORT_SYMBOL(__strncpy_from_user);
+
+@@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char
+ {
+ long res = -EFAULT;
+ if (access_ok(VERIFY_READ, src, 1))
+- __do_strncpy_from_user(dst, src, count, res);
++ res = __do_strncpy_from_user(dst, src, count);
+ return res;
+ }
+ EXPORT_SYMBOL(strncpy_from_user);
+@@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user);
+ * Zero Userspace
+ */
+
+-#define __do_clear_user(addr,size) \
+-do { \
+- int __d0; \
+- might_fault(); \
+- __asm__ __volatile__( \
+- "0: rep; stosl\n" \
+- " movl %2,%0\n" \
+- "1: rep; stosb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "3: lea 0(%2,%0,4),%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- _ASM_EXTABLE(0b,3b) \
+- _ASM_EXTABLE(1b,2b) \
+- : "=&c"(size), "=&D" (__d0) \
+- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
+-} while (0)
++static unsigned long __do_clear_user(void __user *addr, unsigned long size)
++{
++ int __d0;
++
++ might_fault();
++ __asm__ __volatile__(
++ " movw %w6,%%es\n"
++ "0: rep; stosl\n"
++ " movl %2,%0\n"
++ "1: rep; stosb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
++ ".section .fixup,\"ax\"\n"
++ "3: lea 0(%2,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ _ASM_EXTABLE(0b,3b)
++ _ASM_EXTABLE(1b,2b)
++ : "=&c"(size), "=&D" (__d0)
++ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
++ "r"(__USER_DS));
++ return size;
++}
+
+ /**
+ * clear_user: - Zero a block of memory in user space.
+@@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned lon
+ {
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n))
+- __do_clear_user(to, n);
++ n = __do_clear_user(to, n);
+ return n;
+ }
+ EXPORT_SYMBOL(clear_user);
+@@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user);
+ unsigned long
+ __clear_user(void __user *to, unsigned long n)
+ {
+- __do_clear_user(to, n);
+- return n;
++ return __do_clear_user(to, n);
+ }
+ EXPORT_SYMBOL(__clear_user);
+
+@@ -200,14 +210,17 @@ long strnlen_user(const char __user *s,
+ might_fault();
+
+ __asm__ __volatile__(
++ " movw %w8,%%es\n"
+ " testl %0, %0\n"
+ " jz 3f\n"
+- " andl %0,%%ecx\n"
++ " movl %0,%%ecx\n"
+ "0: repne; scasb\n"
+ " setne %%al\n"
+ " subl %%ecx,%0\n"
+ " addl %0,%%eax\n"
+ "1:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
+ ".section .fixup,\"ax\"\n"
+ "2: xorl %%eax,%%eax\n"
+ " jmp 1b\n"
+@@ -219,7 +232,7 @@ long strnlen_user(const char __user *s,
+ " .long 0b,2b\n"
+ ".previous"
+ :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
+- :"0" (n), "1" (s), "2" (0), "3" (mask)
++ :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
+ :"cc");
+ return res & mask;
+ }
+@@ -227,10 +240,11 @@ EXPORT_SYMBOL(strnlen_user);
+
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
++ " movw %w6, %%es\n"
+ " .align 2,0x90\n"
+ "1: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -239,36 +253,36 @@ __copy_user_intel(void __user *to, const
+ " .align 2,0x90\n"
+ "3: movl 0(%4), %%eax\n"
+ "4: movl 4(%4), %%edx\n"
+- "5: movl %%eax, 0(%3)\n"
+- "6: movl %%edx, 4(%3)\n"
++ "5: movl %%eax, %%es:0(%3)\n"
++ "6: movl %%edx, %%es:4(%3)\n"
+ "7: movl 8(%4), %%eax\n"
+ "8: movl 12(%4),%%edx\n"
+- "9: movl %%eax, 8(%3)\n"
+- "10: movl %%edx, 12(%3)\n"
++ "9: movl %%eax, %%es:8(%3)\n"
++ "10: movl %%edx, %%es:12(%3)\n"
+ "11: movl 16(%4), %%eax\n"
+ "12: movl 20(%4), %%edx\n"
+- "13: movl %%eax, 16(%3)\n"
+- "14: movl %%edx, 20(%3)\n"
++ "13: movl %%eax, %%es:16(%3)\n"
++ "14: movl %%edx, %%es:20(%3)\n"
+ "15: movl 24(%4), %%eax\n"
+ "16: movl 28(%4), %%edx\n"
+- "17: movl %%eax, 24(%3)\n"
+- "18: movl %%edx, 28(%3)\n"
++ "17: movl %%eax, %%es:24(%3)\n"
++ "18: movl %%edx, %%es:28(%3)\n"
+ "19: movl 32(%4), %%eax\n"
+ "20: movl 36(%4), %%edx\n"
+- "21: movl %%eax, 32(%3)\n"
+- "22: movl %%edx, 36(%3)\n"
++ "21: movl %%eax, %%es:32(%3)\n"
++ "22: movl %%edx, %%es:36(%3)\n"
+ "23: movl 40(%4), %%eax\n"
+ "24: movl 44(%4), %%edx\n"
+- "25: movl %%eax, 40(%3)\n"
+- "26: movl %%edx, 44(%3)\n"
++ "25: movl %%eax, %%es:40(%3)\n"
++ "26: movl %%edx, %%es:44(%3)\n"
+ "27: movl 48(%4), %%eax\n"
+ "28: movl 52(%4), %%edx\n"
+- "29: movl %%eax, 48(%3)\n"
+- "30: movl %%edx, 52(%3)\n"
++ "29: movl %%eax, %%es:48(%3)\n"
++ "30: movl %%edx, %%es:52(%3)\n"
+ "31: movl 56(%4), %%eax\n"
+ "32: movl 60(%4), %%edx\n"
+- "33: movl %%eax, 56(%3)\n"
+- "34: movl %%edx, 60(%3)\n"
++ "33: movl %%eax, %%es:56(%3)\n"
++ "34: movl %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -282,6 +296,8 @@ __copy_user_intel(void __user *to, const
+ "36: movl %%eax, %0\n"
+ "37: rep; movsb\n"
+ "100:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -328,7 +344,117 @@ __copy_user_intel(void __user *to, const
+ " .long 99b,101b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
++ " .align 2,0x90\n"
++ "1: movl 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: movl 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: movl 0(%4), %%eax\n"
++ "4: movl 4(%4), %%edx\n"
++ "5: movl %%eax, %%es:0(%3)\n"
++ "6: movl %%edx, %%es:4(%3)\n"
++ "7: movl 8(%4), %%eax\n"
++ "8: movl 12(%4),%%edx\n"
++ "9: movl %%eax, %%es:8(%3)\n"
++ "10: movl %%edx, %%es:12(%3)\n"
++ "11: movl 16(%4), %%eax\n"
++ "12: movl 20(%4), %%edx\n"
++ "13: movl %%eax, %%es:16(%3)\n"
++ "14: movl %%edx, %%es:20(%3)\n"
++ "15: movl 24(%4), %%eax\n"
++ "16: movl 28(%4), %%edx\n"
++ "17: movl %%eax, %%es:24(%3)\n"
++ "18: movl %%edx, %%es:28(%3)\n"
++ "19: movl 32(%4), %%eax\n"
++ "20: movl 36(%4), %%edx\n"
++ "21: movl %%eax, %%es:32(%3)\n"
++ "22: movl %%edx, %%es:36(%3)\n"
++ "23: movl 40(%4), %%eax\n"
++ "24: movl 44(%4), %%edx\n"
++ "25: movl %%eax, %%es:40(%3)\n"
++ "26: movl %%edx, %%es:44(%3)\n"
++ "27: movl 48(%4), %%eax\n"
++ "28: movl 52(%4), %%edx\n"
++ "29: movl %%eax, %%es:48(%3)\n"
++ "30: movl %%edx, %%es:52(%3)\n"
++ "31: movl 56(%4), %%eax\n"
++ "32: movl 60(%4), %%edx\n"
++ "33: movl %%eax, %%es:56(%3)\n"
++ "34: movl %%edx, %%es:60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; movsl\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; movsb\n"
++ "100:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "101: lea 0(%%eax,%0,4),%0\n"
++ " jmp 100b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,100b\n"
++ " .long 2b,100b\n"
++ " .long 3b,100b\n"
++ " .long 4b,100b\n"
++ " .long 5b,100b\n"
++ " .long 6b,100b\n"
++ " .long 7b,100b\n"
++ " .long 8b,100b\n"
++ " .long 9b,100b\n"
++ " .long 10b,100b\n"
++ " .long 11b,100b\n"
++ " .long 12b,100b\n"
++ " .long 13b,100b\n"
++ " .long 14b,100b\n"
++ " .long 15b,100b\n"
++ " .long 16b,100b\n"
++ " .long 17b,100b\n"
++ " .long 18b,100b\n"
++ " .long 19b,100b\n"
++ " .long 20b,100b\n"
++ " .long 21b,100b\n"
++ " .long 22b,100b\n"
++ " .long 23b,100b\n"
++ " .long 24b,100b\n"
++ " .long 25b,100b\n"
++ " .long 26b,100b\n"
++ " .long 27b,100b\n"
++ " .long 28b,100b\n"
++ " .long 29b,100b\n"
++ " .long 30b,100b\n"
++ " .long 31b,100b\n"
++ " .long 32b,100b\n"
++ " .long 33b,100b\n"
++ " .long 34b,100b\n"
++ " .long 35b,100b\n"
++ " .long 36b,100b\n"
++ " .long 37b,100b\n"
++ " .long 99b,101b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, cons
+ {
+ int d0, d1;
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, cons
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movl %%eax, 0(%3)\n"
+- " movl %%edx, 4(%3)\n"
++ " movl %%eax, %%es:0(%3)\n"
++ " movl %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movl %%eax, 8(%3)\n"
+- " movl %%edx, 12(%3)\n"
++ " movl %%eax, %%es:8(%3)\n"
++ " movl %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movl %%eax, 16(%3)\n"
+- " movl %%edx, 20(%3)\n"
++ " movl %%eax, %%es:16(%3)\n"
++ " movl %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movl %%eax, 24(%3)\n"
+- " movl %%edx, 28(%3)\n"
++ " movl %%eax, %%es:24(%3)\n"
++ " movl %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movl %%eax, 32(%3)\n"
+- " movl %%edx, 36(%3)\n"
++ " movl %%eax, %%es:32(%3)\n"
++ " movl %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movl %%eax, 40(%3)\n"
+- " movl %%edx, 44(%3)\n"
++ " movl %%eax, %%es:40(%3)\n"
++ " movl %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movl %%eax, 48(%3)\n"
+- " movl %%edx, 52(%3)\n"
++ " movl %%eax, %%es:48(%3)\n"
++ " movl %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movl %%eax, 56(%3)\n"
+- " movl %%edx, 60(%3)\n"
++ " movl %%eax, %%es:56(%3)\n"
++ " movl %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, cons
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+@@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, cons
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing
+ int d0, d1;
+
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movnti %%eax, 0(%3)\n"
+- " movnti %%edx, 4(%3)\n"
++ " movnti %%eax, %%es:0(%3)\n"
++ " movnti %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movnti %%eax, 8(%3)\n"
+- " movnti %%edx, 12(%3)\n"
++ " movnti %%eax, %%es:8(%3)\n"
++ " movnti %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movnti %%eax, 16(%3)\n"
+- " movnti %%edx, 20(%3)\n"
++ " movnti %%eax, %%es:16(%3)\n"
++ " movnti %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movnti %%eax, 24(%3)\n"
+- " movnti %%edx, 28(%3)\n"
++ " movnti %%eax, %%es:24(%3)\n"
++ " movnti %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movnti %%eax, 32(%3)\n"
+- " movnti %%edx, 36(%3)\n"
++ " movnti %%eax, %%es:32(%3)\n"
++ " movnti %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movnti %%eax, 40(%3)\n"
+- " movnti %%edx, 44(%3)\n"
++ " movnti %%eax, %%es:40(%3)\n"
++ " movnti %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movnti %%eax, 48(%3)\n"
+- " movnti %%edx, 52(%3)\n"
++ " movnti %%eax, %%es:48(%3)\n"
++ " movnti %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movnti %%eax, 56(%3)\n"
+- " movnti %%edx, 60(%3)\n"
++ " movnti %%eax, %%es:56(%3)\n"
++ " movnti %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+@@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_n
+ int d0, d1;
+
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_n
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movnti %%eax, 0(%3)\n"
+- " movnti %%edx, 4(%3)\n"
++ " movnti %%eax, %%es:0(%3)\n"
++ " movnti %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movnti %%eax, 8(%3)\n"
+- " movnti %%edx, 12(%3)\n"
++ " movnti %%eax, %%es:8(%3)\n"
++ " movnti %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movnti %%eax, 16(%3)\n"
+- " movnti %%edx, 20(%3)\n"
++ " movnti %%eax, %%es:16(%3)\n"
++ " movnti %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movnti %%eax, 24(%3)\n"
+- " movnti %%edx, 28(%3)\n"
++ " movnti %%eax, %%es:24(%3)\n"
++ " movnti %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movnti %%eax, 32(%3)\n"
+- " movnti %%edx, 36(%3)\n"
++ " movnti %%eax, %%es:32(%3)\n"
++ " movnti %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movnti %%eax, 40(%3)\n"
+- " movnti %%edx, 44(%3)\n"
++ " movnti %%eax, %%es:40(%3)\n"
++ " movnti %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movnti %%eax, 48(%3)\n"
+- " movnti %%edx, 52(%3)\n"
++ " movnti %%eax, %%es:48(%3)\n"
++ " movnti %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movnti %%eax, 56(%3)\n"
+- " movnti %%edx, 60(%3)\n"
++ " movnti %%eax, %%es:56(%3)\n"
++ " movnti %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_n
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: jmp 8b\n"
+@@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_n
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_n
+ */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++ unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+ unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+
+ /* Generic arbitrary sized copy. */
+-#define __copy_user(to, from, size) \
+-do { \
+- int __d0, __d1, __d2; \
+- __asm__ __volatile__( \
+- " cmp $7,%0\n" \
+- " jbe 1f\n" \
+- " movl %1,%0\n" \
+- " negl %0\n" \
+- " andl $7,%0\n" \
+- " subl %0,%3\n" \
+- "4: rep; movsb\n" \
+- " movl %3,%0\n" \
+- " shrl $2,%0\n" \
+- " andl $3,%3\n" \
+- " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
+- " movl %3,%0\n" \
+- "1: rep; movsb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "5: addl %3,%0\n" \
+- " jmp 2b\n" \
+- "3: lea 0(%3,%0,4),%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 4b,5b\n" \
+- " .long 0b,3b\n" \
+- " .long 1b,2b\n" \
+- ".previous" \
+- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
+- : "3"(size), "0"(size), "1"(to), "2"(from) \
+- : "memory"); \
+-} while (0)
+-
+-#define __copy_user_zeroing(to, from, size) \
+-do { \
+- int __d0, __d1, __d2; \
+- __asm__ __volatile__( \
+- " cmp $7,%0\n" \
+- " jbe 1f\n" \
+- " movl %1,%0\n" \
+- " negl %0\n" \
+- " andl $7,%0\n" \
+- " subl %0,%3\n" \
+- "4: rep; movsb\n" \
+- " movl %3,%0\n" \
+- " shrl $2,%0\n" \
+- " andl $3,%3\n" \
+- " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
+- " movl %3,%0\n" \
+- "1: rep; movsb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "5: addl %3,%0\n" \
+- " jmp 6f\n" \
+- "3: lea 0(%3,%0,4),%0\n" \
+- "6: pushl %0\n" \
+- " pushl %%eax\n" \
+- " xorl %%eax,%%eax\n" \
+- " rep; stosb\n" \
+- " popl %%eax\n" \
+- " popl %0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 4b,5b\n" \
+- " .long 0b,3b\n" \
+- " .long 1b,6b\n" \
+- ".previous" \
+- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
+- : "3"(size), "0"(size), "1"(to), "2"(from) \
+- : "memory"); \
+-} while (0)
++static unsigned long
++__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%es\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 2b\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,2b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%ds\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 2b\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,2b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
++
++static unsigned long
++__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%ds\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 6f\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ "6: pushl %0\n"
++ " pushl %%eax\n"
++ " xorl %%eax,%%eax\n"
++ " rep; stosb\n"
++ " popl %%eax\n"
++ " popl %0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,6b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
+
+ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ unsigned long n)
+@@ -775,9 +966,9 @@ survive:
+ }
+ #endif
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ n = __generic_copy_to_user(to, from, n);
+ else
+- n = __copy_user_intel(to, from, n);
++ n = __generic_copy_to_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ else
+ n = __copy_user_zeroing_intel(to, from, n);
+ return n;
+@@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ else
+- n = __copy_user_intel((void __user *)to,
+- (const void *)from, n);
++ n = __generic_copy_from_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
+@@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_zeroing_intel_nocache(to, from, n);
+ else
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ #else
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ #endif
+ return n;
+ }
+@@ -827,59 +1017,40 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);
+ else
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ #else
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ #endif
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to: Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x, int cpu)
+ {
+- if (access_ok(VERIFY_WRITE, to, n))
+- n = __copy_to_user(to, from, n);
+- return n;
++ unsigned long limit = x.seg;
++ struct desc_struct d;
++
++ current_thread_info()->addr_limit = x;
++ if (unlikely(paravirt_enabled()))
++ return;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, 0UL, limit, 0xF3, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S);
+ }
+-EXPORT_SYMBOL(copy_to_user);
+
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to: Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-copy_from_user(void *to, const void __user *from, unsigned long n)
++void set_fs(mm_segment_t x)
+ {
+- if (access_ok(VERIFY_READ, from, n))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
++ __set_fs(x, get_cpu());
++ put_cpu();
+ }
+-EXPORT_SYMBOL(copy_from_user);
++#else
++void set_fs(mm_segment_t x)
++{
++ current_thread_info()->addr_limit = x;
++}
++#endif
++
++EXPORT_SYMBOL(set_fs);
+diff -urNp linux-2.6.31.7/arch/x86/Makefile linux-2.6.31.7/arch/x86/Makefile
+--- linux-2.6.31.7/arch/x86/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/Makefile 2009-12-08 17:39:42.793644779 -0500
+@@ -188,3 +188,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff -urNp linux-2.6.31.7/arch/x86/mm/extable.c linux-2.6.31.7/arch/x86/mm/extable.c
+--- linux-2.6.31.7/arch/x86/mm/extable.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/extable.c 2009-12-08 17:39:42.928663785 -0500
+@@ -1,14 +1,71 @@
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
++#include <linux/sort.h>
+ #include <asm/uaccess.h>
++#include <asm/pgtable.h>
+
++/*
++ * The exception table needs to be sorted so that the binary
++ * search that we use to find entries in it works properly.
++ * This is used both for the kernel exception table and for
++ * the exception tables of modules that get loaded.
++ */
++static int cmp_ex(const void *a, const void *b)
++{
++ const struct exception_table_entry *x = a, *y = b;
++
++ /* avoid overflow */
++ if (x->insn > y->insn)
++ return 1;
++ if (x->insn < y->insn)
++ return -1;
++ return 0;
++}
++
++static void swap_ex(void *a, void *b, int size)
++{
++ struct exception_table_entry t, *x = a, *y = b;
++
++ t = *x;
++
++ pax_open_kernel();
++ *x = *y;
++ *y = t;
++ pax_close_kernel();
++}
++
++void sort_extable(struct exception_table_entry *start,
++ struct exception_table_entry *finish)
++{
++ sort(start, finish - start, sizeof(struct exception_table_entry),
++ cmp_ex, swap_ex);
++}
++
++#ifdef CONFIG_MODULES
++/*
++ * If the exception table is sorted, any referring to the module init
++ * will be at the beginning or the end.
++ */
++void trim_init_extable(struct module *m)
++{
++ /*trim the beginning*/
++ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
++ m->extable++;
++ m->num_exentries--;
++ }
++ /*trim the end*/
++ while (m->num_exentries &&
++ within_module_init(m->extable[m->num_exentries-1].insn, m))
++ m->num_exentries--;
++}
++#endif /* CONFIG_MODULES */
+
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fixup;
+
+ #ifdef CONFIG_PNPBIOS
+- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+diff -urNp linux-2.6.31.7/arch/x86/mm/fault.c linux-2.6.31.7/arch/x86/mm/fault.c
+--- linux-2.6.31.7/arch/x86/mm/fault.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/fault.c 2009-12-08 17:39:42.929666999 -0500
+@@ -11,10 +11,14 @@
+ #include <linux/kprobes.h> /* __kprobes, ... */
+ #include <linux/mmiotrace.h> /* kmmio_handler, ... */
+ #include <linux/perf_counter.h> /* perf_swcounter_event */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm/traps.h> /* dotraplinkage, ... */
+ #include <asm/pgalloc.h> /* pgd_*(), ... */
+ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
++#include <asm/vsyscall.h>
++#include <asm/tlbflush.h>
+
+ /*
+ * Page fault error code bits:
+@@ -51,7 +55,7 @@ static inline int notify_page_fault(stru
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+- if (kprobes_built_in() && !user_mode_vm(regs)) {
++ if (kprobes_built_in() && !user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+@@ -171,6 +175,30 @@ force_sig_info_fault(int si_signo, int s
+ force_sig_info(si_signo, &info, tsk);
+ }
+
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd_present(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+
+@@ -543,7 +571,7 @@ static int is_errata93(struct pt_regs *r
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+ return 1;
+ #endif
+ return 0;
+@@ -570,7 +598,7 @@ static int is_f00f_bug(struct pt_regs *r
+ }
+
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -579,15 +607,27 @@ show_fault_oops(struct pt_regs *regs, un
+ if (!oops_may_print())
+ return;
+
+- if (error_code & PF_INSTR) {
++ if (nx_enabled && (error_code & PF_INSTR)) {
+ unsigned int level;
+
+ pte_t *pte = lookup_address(address, &level);
+
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
+- printk(nx_warning, current_uid());
++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (init_mm.start_code <= address && address < init_mm.end_code)
++ {
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ NIPQUAD(current->signal->curr_ip), current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ }
++#endif
++
+ printk(KERN_ALERT "BUG: unable to handle kernel ");
+ if (address < PAGE_SIZE)
+ printk(KERN_CONT "NULL pointer dereference");
+@@ -712,6 +752,68 @@ __bad_area_nosemaphore(struct pt_regs *r
+ unsigned long address, int si_code)
+ {
+ struct task_struct *tsk = current;
++ struct mm_struct *mm = tsk->mm;
++
++#ifdef CONFIG_X86_64
++ if (mm && (error_code & PF_INSTR)) {
++ if (regs->ip == (unsigned long)vgettimeofday) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
++ return;
++ } else if (regs->ip == (unsigned long)vtime) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
++ return;
++ } else if (regs->ip == (unsigned long)vgetcpu) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
++ return;
++ }
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm && (error_code & PF_USER)) {
++ unsigned long ip = regs->ip;
++
++ if (v8086_mode(regs))
++ ip = ((regs->cs & 0xffff) << 4) + (regs->ip & 0xffff);
++
++ /*
++ * It's possible to have interrupts off here:
++ */
++ local_irq_enable();
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
++ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && regs->ip == address))) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (regs->ip + SEGMEXEC_TASK_SIZE == address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++ }
++#endif
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & PF_USER) {
+@@ -846,6 +948,106 @@ static int spurious_fault_check(unsigned
+ return 1;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++ pte_t *pte;
++ pmd_t *pmd;
++ spinlock_t *ptl;
++ unsigned char pte_mask;
++
++ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
++ return 0;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return 1;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++
++ pmd = pax_get_pmd(mm, address);
++ if (unlikely(!pmd))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++#ifdef CONFIG_SMP
++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++ if (likely(address > get_limit(regs->cs)))
++#endif
++ {
++ set_pte(pte, pte_mkread(*pte));
++ __flush_tlb_one(address);
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ "movw %w4,%%es\n"
++#endif
++ "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg (%0)\n"
++#endif
++ "testb $0,%%es:(%0)\n"
++ "xorb %3,(%1)\n"
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ "pushl %%ss\n"
++ "popl %%es\n"
++#endif
++ :
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
++ : "memory", "cc");
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++}
++#endif
++
+ /*
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+@@ -912,6 +1114,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+ {
++ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++ return 1;
++
+ if (write) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -945,17 +1150,16 @@ do_page_fault(struct pt_regs *regs, unsi
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- unsigned long address;
+ struct mm_struct *mm;
+ int write;
+ int fault;
+
++ /* Get the faulting address: */
++ const unsigned long address = read_cr2();
++
+ tsk = current;
+ mm = tsk->mm;
+
+- /* Get the faulting address: */
+- address = read_cr2();
+-
+ /*
+ * Detect and handle instructions that would cause a page fault for
+ * both a tracked kernel page and a userspace page.
+@@ -1015,7 +1219,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ } else {
+@@ -1069,6 +1273,11 @@ do_page_fault(struct pt_regs *regs, unsi
+ might_sleep();
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++ return;
++#endif
++
+ vma = find_vma(mm, address);
+ if (unlikely(!vma)) {
+ bad_area(regs, error_code, address);
+@@ -1080,18 +1289,24 @@ do_page_fault(struct pt_regs *regs, unsi
+ bad_area(regs, error_code, address);
+ return;
+ }
+- if (error_code & PF_USER) {
+- /*
+- * Accessing the stack below %sp is always a bug.
+- * The large cushion allows instructions like enter
+- * and pusha to work. ("enter $65535, $31" pushes
+- * 32 pointers and then decrements %sp by 65535.)
+- */
+- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+- bad_area(regs, error_code, address);
+- return;
+- }
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535, $31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++ bad_area(regs, error_code, address);
++ return;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++ bad_area(regs, error_code, address);
++ return;
++ }
++#endif
++
+ if (unlikely(expand_stack(vma, address))) {
+ bad_area(regs, error_code, address);
+ return;
+@@ -1135,3 +1350,174 @@ good_area:
+
+ up_read(&mm->mmap_sem);
+ }
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 11) >> 32)
++ break;
++#endif
++
++ err = get_user(mov1, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++ regs->cx = addr1;
++ regs->ax = addr2;
++ regs->ip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB9 && jmp == 0xE9) {
++ regs->cx = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned int addr1;
++ unsigned long addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ if (v8086_mode(regs))
++ return 1;
++
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++
++#ifdef CONFIG_X86_32
++ return pax_handle_fetch_fault_32(regs);
++#else
++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++ return pax_handle_fetch_fault_32(regs);
++ else
++ return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++ for (i = -1; i < 80 / sizeof(long); i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++#ifdef CONFIG_X86_32
++ printk(KERN_CONT "???????? ");
++#else
++ printk(KERN_CONT "???????????????? ");
++#endif
++ else
++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++ }
++ printk("\n");
++}
++#endif
+diff -urNp linux-2.6.31.7/arch/x86/mm/gup.c linux-2.6.31.7/arch/x86/mm/gup.c
+--- linux-2.6.31.7/arch/x86/mm/gup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/gup.c 2009-12-08 17:39:42.930682699 -0500
+@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+diff -urNp linux-2.6.31.7/arch/x86/mm/highmem_32.c linux-2.6.31.7/arch/x86/mm/highmem_32.c
+--- linux-2.6.31.7/arch/x86/mm/highmem_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/highmem_32.c 2009-12-08 17:39:42.930682699 -0500
+@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++ pax_open_kernel();
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
++ pax_close_kernel();
+
+ return (void *)vaddr;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/mm/hugetlbpage.c linux-2.6.31.7/arch/x86/mm/hugetlbpage.c
+--- linux-2.6.31.7/arch/x86/mm/hugetlbpage.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/hugetlbpage.c 2009-12-08 17:39:42.931662815 -0500
+@@ -267,13 +267,18 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (len > mm->cached_hole_size) {
+- start_addr = mm->free_area_cache;
++ start_addr = mm->free_area_cache;
+ } else {
+- start_addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -281,13 +286,13 @@ full_search:
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+- if (TASK_SIZE - len < addr) {
++ if (pax_task_size - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+@@ -310,9 +315,8 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev_vma;
+- unsigned long base = mm->mmap_base, addr = addr0;
++ unsigned long base = mm->mmap_base, addr;
+ unsigned long largest_hole = mm->cached_hole_size;
+- int first_time = 1;
+
+ /* don't allow allocations above current base */
+ if (mm->free_area_cache > base)
+@@ -322,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
+-try_again:
++
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+@@ -364,22 +368,26 @@ try_again:
+
+ fail:
+ /*
+- * if hint left us with no space for the requested
+- * mapping then try again:
+- */
+- if (first_time) {
+- mm->free_area_cache = base;
+- largest_hole = 0;
+- first_time = 0;
+- goto try_again;
+- }
+- /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
+ len, pgoff, flags);
+@@ -387,6 +395,7 @@ fail:
+ /*
+ * Restore the topdown base:
+ */
++ mm->mmap_base = base;
+ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+@@ -400,10 +409,17 @@ hugetlb_get_unmapped_area(struct file *f
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+- if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+@@ -415,7 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (pax_task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/mm/init_32.c linux-2.6.31.7/arch/x86/mm/init_32.c
+--- linux-2.6.31.7/arch/x86/mm/init_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/init_32.c 2009-12-08 17:39:42.932667914 -0500
+@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
+ }
+
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+- pud_t *pud;
+- pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+- if (after_bootmem)
+- pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+- else
+- pmd_table = (pmd_t *)alloc_low_page();
+- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+- return pmd_table;
+- }
+-#endif
+- pud = pud_offset(pgd, 0);
+- pmd_table = pmd_offset(pud, 0);
+-
+- return pmd_table;
+-}
+-
+-/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry:
+ */
+@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
+ page_table = (pte_t *)alloc_low_page();
+
+ paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+ }
+
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++
++ return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+ int pgd_idx = pgd_index(vaddr);
+@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
+- pmd = pmd + pmd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+ pmd++, pmd_idx++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
+ }
+ }
+
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
+- return 1;
+- return 0;
++ if ((start > ktla_ktva((unsigned long)_etext) ||
++ end <= ktla_ktva((unsigned long)_stext)) &&
++ (start > ktla_ktva((unsigned long)_einittext) ||
++ end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++ return 0;
++ return 1;
+ }
+
+ /*
+@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
+ int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+ unsigned long start_pfn, end_pfn;
+ pgd_t *pgd_base = swapper_pg_dir;
+- int pgd_idx, pmd_idx, pte_ofs;
++ unsigned int pgd_idx, pmd_idx, pte_ofs;
+ unsigned long pfn;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned pages_2m, pages_4k;
+@@ -278,8 +279,13 @@ repeat:
+ pfn = start_pfn;
+ pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++ pud = pud_offset(pgd, 0);
++ pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+
+ if (pfn >= end_pfn)
+ continue;
+@@ -291,14 +297,13 @@ repeat:
+ #endif
+ for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+ pmd++, pmd_idx++) {
+- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /*
+ * Map with big pages if possible, otherwise
+ * create normal page tables:
+ */
+ if (use_pse) {
+- unsigned int addr2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+ /*
+ * first pass will use the same initial
+@@ -308,11 +313,7 @@ repeat:
+ __pgprot(PTE_IDENT_ATTR |
+ _PAGE_PSE);
+
+- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+- PAGE_OFFSET + PAGE_SIZE-1;
+-
+- if (is_kernel_text(addr) ||
+- is_kernel_text(addr2))
++ if (is_kernel_text(address, address + PMD_SIZE))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ pages_2m++;
+@@ -329,7 +330,7 @@ repeat:
+ pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+ /*
+ * first pass will use the same initial
+@@ -337,7 +338,7 @@ repeat:
+ */
+ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+
+- if (is_kernel_text(addr))
++ if (is_kernel_text(address, address + PAGE_SIZE))
+ prot = PAGE_KERNEL_EXEC;
+
+ pages_4k++;
+@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
+
+ pud = pud_offset(pgd, va);
+ pmd = pmd_offset(pud, va);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ break;
+
+ pte = pte_offset_kernel(pmd, va);
+@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
+
+ static void __init pagetable_init(void)
+ {
+- pgd_t *pgd_base = swapper_pg_dir;
+-
+- permanent_kmaps_init(pgd_base);
++ permanent_kmaps_init(swapper_pg_dir);
+ }
+
+ #ifdef CONFIG_ACPI_SLEEP
+@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
+ * ACPI suspend needs this for resume, because things like the intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+-char swsusp_pg_dir[PAGE_SIZE]
++pgd_t swsusp_pg_dir[PTRS_PER_PGD]
+ __attribute__ ((aligned(PAGE_SIZE)));
+
+ static inline void save_pg_dir(void)
+ {
+- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ static inline void save_pg_dir(void)
+@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
+ flush_tlb_all();
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -883,7 +882,7 @@ void __init mem_init(void)
+ set_highmem_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+@@ -929,10 +928,10 @@ void __init mem_init(void)
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+- (unsigned long)&_etext, (unsigned long)&_edata,
+- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++ (unsigned long)&_sdata, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+
+- (unsigned long)&_text, (unsigned long)&_etext,
++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+diff -urNp linux-2.6.31.7/arch/x86/mm/init_64.c linux-2.6.31.7/arch/x86/mm/init_64.c
+--- linux-2.6.31.7/arch/x86/mm/init_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/init_64.c 2009-12-08 17:39:42.933667530 -0500
+@@ -163,7 +163,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++ pax_open_kernel();
+ set_pte(pte, new_pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+@@ -222,14 +224,12 @@ static void __init __init_extra_mapping(
+ pgd = pgd_offset_k((unsigned long)__va(phys));
+ if (pgd_none(*pgd)) {
+ pud = (pud_t *) spp_getpage();
+- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+ }
+ pud = pud_offset(pgd, (unsigned long)__va(phys));
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+ }
+ pmd = pmd_offset(pud, phys);
+ BUG_ON(!pmd_none(*pmd));
+@@ -848,8 +848,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ
+ };
+
+ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+@@ -883,7 +883,7 @@ int in_gate_area_no_task(unsigned long a
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
+ if (vma == &gate_vma)
+ return "[vsyscall]";
+diff -urNp linux-2.6.31.7/arch/x86/mm/init.c linux-2.6.31.7/arch/x86/mm/init.c
+--- linux-2.6.31.7/arch/x86/mm/init.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/init.c 2009-12-08 17:39:42.931662815 -0500
+@@ -28,11 +28,10 @@ int direct_gbpages
+ #endif
+ ;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ int nx_enabled;
+
+-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+-static int disable_nx __cpuinitdata;
+-
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+ * noexec = on|off
+ *
+@@ -46,32 +45,26 @@ static int __init noexec_setup(char *str
+ if (!str)
+ return -EINVAL;
+ if (!strncmp(str, "on", 2)) {
+- __supported_pte_mask |= _PAGE_NX;
+- disable_nx = 0;
++ nx_enabled = 1;
+ } else if (!strncmp(str, "off", 3)) {
+- disable_nx = 1;
+- __supported_pte_mask &= ~_PAGE_NX;
++ nx_enabled = 0;
+ }
+ return 0;
+ }
+ early_param("noexec", noexec_setup);
+ #endif
++#endif
+
+ #ifdef CONFIG_X86_PAE
+ static void __init set_nx(void)
+ {
+- unsigned int v[4], l, h;
+-
+- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if (!nx_enabled && cpu_has_nx) {
++ unsigned l, h;
+
+- if ((v[3] & (1 << 20)) && !disable_nx) {
+- rdmsr(MSR_EFER, l, h);
+- l |= EFER_NX;
+- wrmsr(MSR_EFER, l, h);
+- nx_enabled = 1;
+- __supported_pte_mask |= _PAGE_NX;
+- }
++ __supported_pte_mask &= ~_PAGE_NX;
++ rdmsr(MSR_EFER, l, h);
++ l &= ~EFER_NX;
++ wrmsr(MSR_EFER, l, h);
+ }
+ }
+ #else
+@@ -86,7 +79,7 @@ void __cpuinit check_efer(void)
+ unsigned long efer;
+
+ rdmsrl(MSR_EFER, efer);
+- if (!(efer & EFER_NX) || disable_nx)
++ if (!(efer & EFER_NX) || !nx_enabled)
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+ #endif
+@@ -394,7 +387,13 @@ unsigned long __init_refok init_memory_m
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr <= 256)
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 1;
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+@@ -442,6 +441,83 @@ void free_init_pages(char *what, unsigne
+
+ void free_initmem(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++ if (!paravirt_enabled())
++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++*/
++#ifdef CONFIG_X86_PAE
++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++#endif
++
++#ifdef CONFIG_MODULES
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++ unsigned long addr, end;
++
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff -urNp linux-2.6.31.7/arch/x86/mm/iomap_32.c linux-2.6.31.7/arch/x86/mm/iomap_32.c
+--- linux-2.6.31.7/arch/x86/mm/iomap_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/iomap_32.c 2009-12-08 17:39:42.933667530 -0500
+@@ -42,7 +42,11 @@ void *kmap_atomic_prot_pfn(unsigned long
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++ pax_open_kernel();
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff -urNp linux-2.6.31.7/arch/x86/mm/ioremap.c linux-2.6.31.7/arch/x86/mm/ioremap.c
+--- linux-2.6.31.7/arch/x86/mm/ioremap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/ioremap.c 2009-12-08 17:39:42.934666348 -0500
+@@ -111,8 +111,8 @@ int page_is_ram(unsigned long pagenr)
+ * Second special case: Some BIOSen report the PC BIOS
+ * area (640->1Mb) as ram even though it is not.
+ */
+- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
+- pagenr < (BIOS_END >> PAGE_SHIFT))
++ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
++ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 0;
+
+ for (i = 0; i < e820.nr_map; i++) {
+@@ -207,10 +207,7 @@ static void __iomem *__ioremap_caller(re
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+- for (pfn = phys_addr >> PAGE_SHIFT;
+- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+- pfn++) {
+-
++ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
+ int is_ram = page_is_ram(pfn);
+
+ if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+@@ -272,6 +269,8 @@ static void __iomem *__ioremap_caller(re
+ break;
+ }
+
++ prot = canon_pgprot(prot);
++
+ /*
+ * Ok, go for it..
+ */
+@@ -489,7 +488,7 @@ static int __init early_ioremap_debug_se
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -521,8 +520,7 @@ void __init early_ioremap_init(void)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+- memset(bm_pte, 0, sizeof(bm_pte));
+- pmd_populate_kernel(&init_mm, pmd, bm_pte);
++ pmd_populate_user(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+diff -urNp linux-2.6.31.7/arch/x86/mm/mmap.c linux-2.6.31.7/arch/x86/mm/mmap.c
+--- linux-2.6.31.7/arch/x86/mm/mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/mmap.c 2009-12-08 17:39:42.934666348 -0500
+@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
+ * Leave an at least ~128 MB hole with possible stack randomization.
+ */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
+ return rnd << PAGE_SHIFT;
+ }
+
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+
+ /*
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++static unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+- if (mmap_is_ia32())
++ if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ return SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
+ return TASK_UNMAPPED_BASE;
+- else
++ } else
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+
+@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mmap_legacy_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
++ mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/mm/numa_32.c linux-2.6.31.7/arch/x86/mm/numa_32.c
+--- linux-2.6.31.7/arch/x86/mm/numa_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/numa_32.c 2009-12-08 17:39:42.935663060 -0500
+@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
+ }
+ #endif
+
+-extern unsigned long find_max_low_pfn(void);
+ extern unsigned long highend_pfn, highstart_pfn;
+
+ #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+diff -urNp linux-2.6.31.7/arch/x86/mm/pageattr.c linux-2.6.31.7/arch/x86/mm/pageattr.c
+--- linux-2.6.31.7/arch/x86/mm/pageattr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/pageattr.c 2009-12-08 17:39:42.936670174 -0500
+@@ -267,9 +267,10 @@ static inline pgprot_t static_protection
+ * Does not cover __inittext since that is gone later on. On
+ * 64bit we do not enforce !NX on the low mapping
+ */
+- if (within(address, (unsigned long)_text, (unsigned long)_etext))
++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
+ pgprot_val(forbidden) |= _PAGE_NX;
+
++#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+@@ -277,6 +278,7 @@ static inline pgprot_t static_protection
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+@@ -330,7 +332,10 @@ EXPORT_SYMBOL_GPL(lookup_address);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+ /* change init_mm */
++ pax_open_kernel();
+ set_pte_atomic(kpte, pte);
++ pax_close_kernel();
++
+ #ifdef CONFIG_X86_32
+ if (!SHARED_KERNEL_PMD) {
+ struct page *page;
+diff -urNp linux-2.6.31.7/arch/x86/mm/pageattr-test.c linux-2.6.31.7/arch/x86/mm/pageattr-test.c
+--- linux-2.6.31.7/arch/x86/mm/pageattr-test.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/pageattr-test.c 2009-12-08 17:39:42.935663060 -0500
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff -urNp linux-2.6.31.7/arch/x86/mm/pat.c linux-2.6.31.7/arch/x86/mm/pat.c
+--- linux-2.6.31.7/arch/x86/mm/pat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/pat.c 2009-12-08 17:39:42.936670174 -0500
+@@ -213,7 +213,7 @@ chk_conflict(struct memtype *new, struct
+
+ conflict:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
++ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
+ new->end, cattr_name(new->type), cattr_name(entry->type));
+ return -EBUSY;
+ }
+@@ -487,7 +487,7 @@ int free_memtype(u64 start, u64 end)
+
+ if (err) {
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
+- current->comm, current->pid, start, end);
++ current->comm, task_pid_nr(current), start, end);
+ }
+
+ dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+@@ -588,7 +588,7 @@ int kernel_map_sync_memtype(u64 base, un
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+@@ -628,7 +628,7 @@ static int reserve_pfn_range(u64 paddr,
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+@@ -827,7 +827,7 @@ static int memtype_seq_show(struct seq_f
+ return 0;
+ }
+
+-static struct seq_operations memtype_seq_ops = {
++static const struct seq_operations memtype_seq_ops = {
+ .start = memtype_seq_start,
+ .next = memtype_seq_next,
+ .stop = memtype_seq_stop,
+diff -urNp linux-2.6.31.7/arch/x86/mm/pgtable_32.c linux-2.6.31.7/arch/x86/mm/pgtable_32.c
+--- linux-2.6.31.7/arch/x86/mm/pgtable_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/pgtable_32.c 2009-12-08 17:39:42.937667368 -0500
+@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+diff -urNp linux-2.6.31.7/arch/x86/mm/tlb.c linux-2.6.31.7/arch/x86/mm/tlb.c
+--- linux-2.6.31.7/arch/x86/mm/tlb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/mm/tlb.c 2009-12-08 17:39:42.937667368 -0500
+@@ -12,7 +12,7 @@
+ #include <asm/uv/uv.h>
+
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+- = { &init_mm, 0, };
++ = { &init_mm, 0 };
+
+ /*
+ * Smarter SMP flushing macros.
+diff -urNp linux-2.6.31.7/arch/x86/oprofile/backtrace.c linux-2.6.31.7/arch/x86/oprofile/backtrace.c
+--- linux-2.6.31.7/arch/x86/oprofile/backtrace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/oprofile/backtrace.c 2009-12-08 17:39:42.937667368 -0500
+@@ -37,7 +37,7 @@ static void backtrace_address(void *data
+ unsigned int *depth = data;
+
+ if ((*depth)--)
+- oprofile_add_trace(addr);
++ oprofile_add_trace(ktla_ktva(addr));
+ }
+
+ static struct stacktrace_ops backtrace_ops = {
+@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
+ struct frame_head bufhead[2];
+
+ /* Also check accessibility of one struct frame_head beyond */
+- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
++ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
+ return NULL;
+ if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+ return NULL;
+@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
+ {
+ struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned long stack = kernel_stack_pointer(regs);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff -urNp linux-2.6.31.7/arch/x86/oprofile/op_model_p4.c linux-2.6.31.7/arch/x86/oprofile/op_model_p4.c
+--- linux-2.6.31.7/arch/x86/oprofile/op_model_p4.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/oprofile/op_model_p4.c 2009-12-08 17:39:42.938668486 -0500
+@@ -48,7 +48,7 @@ static inline void setup_num_counters(vo
+ #endif
+ }
+
+-static int inline addr_increment(void)
++static inline int addr_increment(void)
+ {
+ #ifdef CONFIG_SMP
+ return smp_num_siblings == 2 ? 2 : 1;
+diff -urNp linux-2.6.31.7/arch/x86/pci/common.c linux-2.6.31.7/arch/x86/pci/common.c
+--- linux-2.6.31.7/arch/x86/pci/common.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/common.c 2009-12-08 17:39:42.938668486 -0500
+@@ -31,8 +31,8 @@ int noioapicreroute = 1;
+ int pcibios_last_bus = -1;
+ unsigned long pirq_table_addr;
+ struct pci_bus *pci_root_bus;
+-struct pci_raw_ops *raw_pci_ops;
+-struct pci_raw_ops *raw_pci_ext_ops;
++const struct pci_raw_ops *raw_pci_ops;
++const struct pci_raw_ops *raw_pci_ext_ops;
+
+ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val)
+@@ -370,7 +370,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
+ },
+ },
+- {}
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ void __init dmi_check_pciprobe(void)
+diff -urNp linux-2.6.31.7/arch/x86/pci/direct.c linux-2.6.31.7/arch/x86/pci/direct.c
+--- linux-2.6.31.7/arch/x86/pci/direct.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/direct.c 2009-12-08 17:39:42.938668486 -0500
+@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
+
+ #undef PCI_CONF1_ADDRESS
+
+-struct pci_raw_ops pci_direct_conf1 = {
++const struct pci_raw_ops pci_direct_conf1 = {
+ .read = pci_conf1_read,
+ .write = pci_conf1_write,
+ };
+@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
+
+ #undef PCI_CONF2_ADDRESS
+
+-struct pci_raw_ops pci_direct_conf2 = {
++const struct pci_raw_ops pci_direct_conf2 = {
+ .read = pci_conf2_read,
+ .write = pci_conf2_write,
+ };
+@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
+ * This should be close to trivial, but it isn't, because there are buggy
+ * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
+ */
+-static int __init pci_sanity_check(struct pci_raw_ops *o)
++static int __init pci_sanity_check(const struct pci_raw_ops *o)
+ {
+ u32 x = 0;
+ int devfn;
+diff -urNp linux-2.6.31.7/arch/x86/pci/fixup.c linux-2.6.31.7/arch/x86/pci/fixup.c
+--- linux-2.6.31.7/arch/x86/pci/fixup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/fixup.c 2009-12-08 17:39:42.939664190 -0500
+@@ -364,7 +364,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
+ },
+ },
+- {}
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ /*
+@@ -435,7 +435,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
+diff -urNp linux-2.6.31.7/arch/x86/pci/i386.c linux-2.6.31.7/arch/x86/pci/i386.c
+--- linux-2.6.31.7/arch/x86/pci/i386.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/i386.c 2009-12-08 17:39:42.939664190 -0500
+@@ -266,7 +266,7 @@ void pcibios_set_master(struct pci_dev *
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+ }
+
+-static struct vm_operations_struct pci_mmap_ops = {
++static const struct vm_operations_struct pci_mmap_ops = {
+ .access = generic_access_phys,
+ };
+
+diff -urNp linux-2.6.31.7/arch/x86/pci/irq.c linux-2.6.31.7/arch/x86/pci/irq.c
+--- linux-2.6.31.7/arch/x86/pci/irq.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/irq.c 2009-12-08 17:39:42.940661170 -0500
+@@ -543,7 +543,7 @@ static __init int intel_router_probe(str
+ static struct pci_device_id __initdata pirq_440gx[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
+- { },
++ { PCI_DEVICE(0, 0) }
+ };
+
+ /* 440GX has a proprietary PIRQ router -- don't use it */
+@@ -1107,7 +1107,7 @@ static struct dmi_system_id __initdata p
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ int __init pcibios_irq_init(void)
+diff -urNp linux-2.6.31.7/arch/x86/pci/mmconfig_32.c linux-2.6.31.7/arch/x86/pci/mmconfig_32.c
+--- linux-2.6.31.7/arch/x86/pci/mmconfig_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/mmconfig_32.c 2009-12-08 17:39:42.940661170 -0500
+@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_mmcfg = {
++static const struct pci_raw_ops pci_mmcfg = {
+ .read = pci_mmcfg_read,
+ .write = pci_mmcfg_write,
+ };
+diff -urNp linux-2.6.31.7/arch/x86/pci/mmconfig_64.c linux-2.6.31.7/arch/x86/pci/mmconfig_64.c
+--- linux-2.6.31.7/arch/x86/pci/mmconfig_64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/mmconfig_64.c 2009-12-08 17:39:42.940661170 -0500
+@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_mmcfg = {
++static const struct pci_raw_ops pci_mmcfg = {
+ .read = pci_mmcfg_read,
+ .write = pci_mmcfg_write,
+ };
+diff -urNp linux-2.6.31.7/arch/x86/pci/numaq_32.c linux-2.6.31.7/arch/x86/pci/numaq_32.c
+--- linux-2.6.31.7/arch/x86/pci/numaq_32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/numaq_32.c 2009-12-08 17:39:42.941671710 -0500
+@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
+
+ #undef PCI_CONF1_MQ_ADDRESS
+
+-static struct pci_raw_ops pci_direct_conf1_mq = {
++static const struct pci_raw_ops pci_direct_conf1_mq = {
+ .read = pci_conf1_mq_read,
+ .write = pci_conf1_mq_write
+ };
+diff -urNp linux-2.6.31.7/arch/x86/pci/olpc.c linux-2.6.31.7/arch/x86/pci/olpc.c
+--- linux-2.6.31.7/arch/x86/pci/olpc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/olpc.c 2009-12-08 17:39:42.941671710 -0500
+@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
+ return 0;
+ }
+
+-static struct pci_raw_ops pci_olpc_conf = {
++static const struct pci_raw_ops pci_olpc_conf = {
+ .read = pci_olpc_read,
+ .write = pci_olpc_write,
+ };
+diff -urNp linux-2.6.31.7/arch/x86/pci/pcbios.c linux-2.6.31.7/arch/x86/pci/pcbios.c
+--- linux-2.6.31.7/arch/x86/pci/pcbios.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/pci/pcbios.c 2009-12-08 17:39:42.941671710 -0500
+@@ -56,50 +56,93 @@ union bios32 {
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+-static unsigned long bios32_service(unsigned long service)
++static unsigned long __devinit bios32_service(unsigned long service)
+ {
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
++ struct desc_struct d, *gdt;
+
+ local_irq_save(flags);
+- __asm__("lcall *(%%edi); cld"
++
++ gdt = get_cpu_gdt_table(smp_processor_id());
++
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+- "D" (&bios32_indirect));
++ "D" (&bios32_indirect),
++ "r"(__PCIBIOS_DS)
++ : "memory");
++
++ pax_open_kernel();
++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+
+ switch (return_code) {
+- case 0:
+- return address + entry;
+- case 0x80: /* Not present */
+- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+- return 0;
+- default: /* Shouldn't happen */
+- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+- service, return_code);
++ case 0: {
++ int cpu;
++ unsigned char flags;
++
++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++ printk(KERN_WARNING "bios32_service: not valid\n");
+ return 0;
++ }
++ address = address + PAGE_OFFSET;
++ length += 16UL; /* some BIOSs underreport this... */
++ flags = 4;
++ if (length >= 64*1024*1024) {
++ length >>= PAGE_SHIFT;
++ flags |= 8;
++ }
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ pack_descriptor(&d, address, length, 0x9b, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, address, length, 0x93, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++ }
++ return entry;
++ }
++ case 0x80: /* Not present */
++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++ return 0;
++ default: /* Shouldn't happen */
++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++ service, return_code);
++ return 0;
+ }
+ }
+
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+
+ static int __devinit check_pcibios(void)
+ {
+@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
+ unsigned long flags, pcibios_entry;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++ pci_indirect.address = pcibios_entry;
+
+ local_irq_save(flags);
+- __asm__(
+- "lcall *(%%edi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%edi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
+ "=b" (ebx),
+ "=c" (ecx)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+- "D" (&pci_indirect)
++ "D" (&pci_indirect),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ local_irq_restore(flags);
+
+@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 8 bits, do not trust the
+ * BIOS having done it:
+@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xff;
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 16 bits, do not trust the
+ * BIOS having done it:
+@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xffff;
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
+ * Function table for BIOS32 access
+ */
+
+-static struct pci_raw_ops pci_bios_access = {
++static const struct pci_raw_ops pci_bios_access = {
+ .read = pci_bios_read,
+ .write = pci_bios_write
+ };
+@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
+ * Try to find PCI BIOS.
+ */
+
+-static struct pci_raw_ops * __devinit pci_find_bios(void)
++static const struct pci_raw_ops * __devinit pci_find_bios(void)
+ {
+ union bios32 *check;
+ unsigned char sum;
+@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
+
+ DBG("PCI: Fetching IRQ routing table... ");
+ __asm__("push %%es\n\t"
++ "movw %w8, %%ds\n\t"
+ "push %%ds\n\t"
+ "pop %%es\n\t"
+- "lcall *(%%esi); cld\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
+ "pop %%es\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
+ "1" (0),
+ "D" ((long) &opt),
+ "S" (&pci_indirect),
+- "m" (opt)
++ "m" (opt),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+ if (ret & 0xff00)
+@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
+ {
+ int ret;
+
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w5, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
+ : "0" (PCIBIOS_SET_PCI_HW_INT),
+ "b" ((dev->bus->number << 8) | dev->devfn),
+ "c" ((irq << 8) | (pin + 10)),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff -urNp linux-2.6.31.7/arch/x86/power/cpu.c linux-2.6.31.7/arch/x86/power/cpu.c
+--- linux-2.6.31.7/arch/x86/power/cpu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/power/cpu.c 2009-12-08 17:39:42.942666186 -0500
+@@ -126,7 +126,7 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+
+ set_tss_desc(cpu, t); /*
+ * This just modifies memory; should not be
+@@ -136,7 +136,9 @@ static void fix_processor_context(void)
+ */
+
+ #ifdef CONFIG_X86_64
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
++ pax_close_kernel();
+
+ syscall_init(); /* This sets MSR_*STAR and related */
+ #endif
+diff -urNp linux-2.6.31.7/arch/x86/vdso/Makefile linux-2.6.31.7/arch/x86/vdso/Makefile
+--- linux-2.6.31.7/arch/x86/vdso/Makefile 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/Makefile 2009-12-08 17:39:42.942666186 -0500
+@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
+ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+
+-VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
+
+ #
+diff -urNp linux-2.6.31.7/arch/x86/vdso/vclock_gettime.c linux-2.6.31.7/arch/x86/vdso/vclock_gettime.c
+--- linux-2.6.31.7/arch/x86/vdso/vclock_gettime.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/vclock_gettime.c 2009-12-08 17:39:42.943663292 -0500
+@@ -22,24 +22,48 @@
+ #include <asm/hpet.h>
+ #include <asm/unistd.h>
+ #include <asm/io.h>
++#include <asm/fixmap.h>
+ #include "vextern.h"
+
+ #define gtod vdso_vsyscall_gtod_data
+
++notrace noinline long __vdso_fallback_time(long *t)
++{
++ long secs;
++ asm volatile("syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
++ return secs;
++}
++
+ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+ {
+ long ret;
+ asm("syscall" : "=a" (ret) :
+- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
++ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
+ return ret;
+ }
+
++notrace static inline cycle_t __vdso_vread_hpet(void)
++{
++ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
++}
++
++notrace static inline cycle_t __vdso_vread_tsc(void)
++{
++ cycle_t ret = (cycle_t)vget_cycles();
++
++ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
++}
++
+ notrace static inline long vgetns(void)
+ {
+ long v;
+- cycles_t (*vread)(void);
+- vread = gtod->clock.vread;
+- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
++ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
++ v = __vdso_vread_tsc();
++ else
++ v = __vdso_vread_hpet();
++ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
+ return (v * gtod->clock.mult) >> gtod->clock.shift;
+ }
+
+@@ -88,7 +112,9 @@ notrace static noinline int do_monotonic
+
+ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+ {
+- if (likely(gtod->sysctl_enabled && gtod->clock.vread))
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
+ switch (clock) {
+ case CLOCK_REALTIME:
+ return do_realtime(ts);
+@@ -100,10 +126,20 @@ notrace int __vdso_clock_gettime(clockid
+ int clock_gettime(clockid_t, struct timespec *)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
+
+-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+ {
+ long ret;
+- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
++ asm("syscall" : "=a" (ret) :
++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
++ return ret;
++}
++
++notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
++ {
+ if (likely(tv != NULL)) {
+ BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
+ offsetof(struct timespec, tv_nsec) ||
+@@ -118,9 +154,7 @@ notrace int __vdso_gettimeofday(struct t
+ }
+ return 0;
+ }
+- asm("syscall" : "=a" (ret) :
+- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+- return ret;
++ return __vdso_fallback_gettimeofday(tv, tz);
+ }
+ int gettimeofday(struct timeval *, struct timezone *)
+ __attribute__((weak, alias("__vdso_gettimeofday")));
+diff -urNp linux-2.6.31.7/arch/x86/vdso/vdso32-setup.c linux-2.6.31.7/arch/x86/vdso/vdso32-setup.c
+--- linux-2.6.31.7/arch/x86/vdso/vdso32-setup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/vdso32-setup.c 2009-12-08 17:39:42.944663877 -0500
+@@ -25,6 +25,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
++#include <asm/mman.h>
+
+ enum {
+ VDSO_DISABLED = 0,
+@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
+ void enable_sep_cpu(void)
+ {
+ int cpu = get_cpu();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ if (compat_uses_vma || !compat) {
+ /*
+@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
+ }
+
+ current_thread_info()->sysenter_return =
+- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+
+@@ -388,7 +389,7 @@ static ctl_table abi_table2[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static ctl_table abi_root_table2[] = {
+@@ -398,7 +399,7 @@ static ctl_table abi_root_table2[] = {
+ .mode = 0555,
+ .child = abi_table2
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static __init int ia32_binfmt_init(void)
+@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++ return "[vdso]";
++#endif
++
+ return NULL;
+ }
+
+@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
+ struct mm_struct *mm = tsk->mm;
+
+ /* Check to see if this task was created in compat vdso mode */
+- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+ return &gate_vma;
+ return NULL;
+ }
+diff -urNp linux-2.6.31.7/arch/x86/vdso/vdso.lds.S linux-2.6.31.7/arch/x86/vdso/vdso.lds.S
+--- linux-2.6.31.7/arch/x86/vdso/vdso.lds.S 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/vdso.lds.S 2009-12-08 17:39:42.943663292 -0500
+@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
+ #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
++
++#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
++VEXTERN(fallback_gettimeofday)
++VEXTERN(fallback_time)
++VEXTERN(getcpu)
++#undef VEXTERN
+diff -urNp linux-2.6.31.7/arch/x86/vdso/vextern.h linux-2.6.31.7/arch/x86/vdso/vextern.h
+--- linux-2.6.31.7/arch/x86/vdso/vextern.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/vextern.h 2009-12-08 17:39:42.944663877 -0500
+@@ -11,6 +11,5 @@
+ put into vextern.h and be referenced as a pointer with vdso prefix.
+ The main kernel later fills in the values. */
+
+-VEXTERN(jiffies)
+ VEXTERN(vgetcpu_mode)
+ VEXTERN(vsyscall_gtod_data)
+diff -urNp linux-2.6.31.7/arch/x86/vdso/vma.c linux-2.6.31.7/arch/x86/vdso/vma.c
+--- linux-2.6.31.7/arch/x86/vdso/vma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/vdso/vma.c 2009-12-08 17:39:42.944663877 -0500
+@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
+ if (!vbase)
+ goto oom;
+
+- if (memcmp(vbase, "\177ELF", 4)) {
++ if (memcmp(vbase, ELFMAG, SELFMAG)) {
+ printk("VDSO: I'm broken; not ELF\n");
+ vdso_enabled = 0;
+ }
+@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
+ *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
++ vunmap(vbase);
+ return 0;
+
+ oom:
+@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
+ goto up_fail;
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ ret = install_special_mapping(mm, addr, vdso_size,
+ VM_READ|VM_EXEC|
+@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
+ VM_ALWAYSDUMP,
+ vdso_pages);
+ if (ret) {
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+ goto up_fail;
+ }
+
+@@ -132,10 +133,3 @@ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+-
+-static __init int vdso_setup(char *s)
+-{
+- vdso_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff -urNp linux-2.6.31.7/arch/x86/xen/debugfs.c linux-2.6.31.7/arch/x86/xen/debugfs.c
+--- linux-2.6.31.7/arch/x86/xen/debugfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/xen/debugfs.c 2009-12-08 17:39:42.945674723 -0500
+@@ -100,7 +100,7 @@ static int xen_array_release(struct inod
+ return 0;
+ }
+
+-static struct file_operations u32_array_fops = {
++static const struct file_operations u32_array_fops = {
+ .owner = THIS_MODULE,
+ .open = u32_array_open,
+ .release= xen_array_release,
+diff -urNp linux-2.6.31.7/arch/x86/xen/enlighten.c linux-2.6.31.7/arch/x86/xen/enlighten.c
+--- linux-2.6.31.7/arch/x86/xen/enlighten.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/xen/enlighten.c 2009-12-08 17:39:42.945674723 -0500
+@@ -70,8 +70,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+
+ struct shared_info xen_dummy_shared_info;
+
+-void *xen_initial_gdt;
+-
+ /*
+ * Point at some empty memory to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
+
+ preempt_disable();
+
+- start = __get_cpu_var(idt_desc).address;
++ start = (unsigned long)__get_cpu_var(idt_desc).address;
+ end = start + __get_cpu_var(idt_desc).size + 1;
+
+ xen_mc_flush();
+@@ -1126,13 +1124,6 @@ asmlinkage void __init xen_start_kernel(
+
+ machine_ops = xen_machine_ops;
+
+- /*
+- * The only reliable way to retain the initial address of the
+- * percpu gdt_page is to remember it here, so we can go and
+- * mark it RW later, when the initial percpu area is freed.
+- */
+- xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+ xen_smp_init();
+
+ pgd = (pgd_t *)xen_start_info->pt_base;
+diff -urNp linux-2.6.31.7/arch/x86/xen/mmu.c linux-2.6.31.7/arch/x86/xen/mmu.c
+--- linux-2.6.31.7/arch/x86/xen/mmu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/xen/mmu.c 2009-12-08 17:39:42.946666694 -0500
+@@ -1707,6 +1707,8 @@ __init pgd_t *xen_setup_kernel_pagetable
+ convert_pfn_mfn(init_level4_pgt);
+ convert_pfn_mfn(level3_ident_pgt);
+ convert_pfn_mfn(level3_kernel_pgt);
++ convert_pfn_mfn(level3_vmalloc_pgt);
++ convert_pfn_mfn(level3_vmemmap_pgt);
+
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+@@ -1725,7 +1727,10 @@ __init pgd_t *xen_setup_kernel_pagetable
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+diff -urNp linux-2.6.31.7/arch/x86/xen/smp.c linux-2.6.31.7/arch/x86/xen/smp.c
+--- linux-2.6.31.7/arch/x86/xen/smp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/xen/smp.c 2009-12-08 17:39:42.946666694 -0500
+@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
+ {
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+-
+- /* We've switched to the "real" per-cpu gdt, so make sure the
+- old memory can be recycled */
+- make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ xen_setup_vcpu_info_placement();
+ }
+
+@@ -231,8 +226,8 @@ cpu_initialize_context(unsigned int cpu,
+ gdt = get_cpu_gdt_table(cpu);
+
+ ctxt->flags = VGCF_IN_KERNEL;
+- ctxt->user_regs.ds = __USER_DS;
+- ctxt->user_regs.es = __USER_DS;
++ ctxt->user_regs.ds = __KERNEL_DS;
++ ctxt->user_regs.es = __KERNEL_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+diff -urNp linux-2.6.31.7/arch/x86/xen/xen-ops.h linux-2.6.31.7/arch/x86/xen/xen-ops.h
+--- linux-2.6.31.7/arch/x86/xen/xen-ops.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/x86/xen/xen-ops.h 2009-12-08 17:39:42.946666694 -0500
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+diff -urNp linux-2.6.31.7/arch/xtensa/include/asm/atomic.h linux-2.6.31.7/arch/xtensa/include/asm/atomic.h
+--- linux-2.6.31.7/arch/xtensa/include/asm/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/arch/xtensa/include/asm/atomic.h 2009-12-08 17:39:42.946666694 -0500
+@@ -49,6 +49,14 @@
+ #define atomic_read(v) ((v)->counter)
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++#define atomic_read_unchecked(v) ((v)->counter)
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -58,6 +66,15 @@
+ #define atomic_set(v,i) ((v)->counter = (i))
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++#define atomic_set_unchecked(v,i) ((v)->counter = (i))
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -81,6 +98,11 @@ static inline void atomic_add(int i, ato
+ );
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t * v)
++{
++ atomic_add(i, (atomic_t *)v);
++}
++
+ /**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+@@ -105,6 +127,11 @@ static inline void atomic_sub(int i, ato
+ );
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub(i, (atomic_t *)v);
++}
++
+ /*
+ * We use atomic_{add|sub}_return to define other functions.
+ */
+@@ -165,6 +192,7 @@ static inline int atomic_sub_return(int
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1,(v))
+
+ /**
+ * atomic_inc - increment atomic variable
+diff -urNp linux-2.6.31.7/block/blk-integrity.c linux-2.6.31.7/block/blk-integrity.c
+--- linux-2.6.31.7/block/blk-integrity.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/block/blk-integrity.c 2009-12-08 17:39:42.947662655 -0500
+@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
+ NULL,
+ };
+
+-static struct sysfs_ops integrity_ops = {
++static const struct sysfs_ops integrity_ops = {
+ .show = &integrity_attr_show,
+ .store = &integrity_attr_store,
+ };
+diff -urNp linux-2.6.31.7/block/blk-map.c linux-2.6.31.7/block/blk-map.c
+--- linux-2.6.31.7/block/blk-map.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/block/blk-map.c 2009-12-08 17:39:42.947662655 -0500
+@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+- if (blk_rq_aligned(q, ubuf, len) && !map_data)
++ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
+ bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
+ else
+ bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
+diff -urNp linux-2.6.31.7/block/blk-sysfs.c linux-2.6.31.7/block/blk-sysfs.c
+--- linux-2.6.31.7/block/blk-sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/block/blk-sysfs.c 2009-12-08 17:39:42.947662655 -0500
+@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
+ kmem_cache_free(blk_requestq_cachep, q);
+ }
+
+-static struct sysfs_ops queue_sysfs_ops = {
++static const struct sysfs_ops queue_sysfs_ops = {
+ .show = queue_attr_show,
+ .store = queue_attr_store,
+ };
+diff -urNp linux-2.6.31.7/block/elevator.c linux-2.6.31.7/block/elevator.c
+--- linux-2.6.31.7/block/elevator.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/block/elevator.c 2009-12-08 17:39:42.952669663 -0500
+@@ -901,7 +901,7 @@ elv_attr_store(struct kobject *kobj, str
+ return error;
+ }
+
+-static struct sysfs_ops elv_sysfs_ops = {
++static const struct sysfs_ops elv_sysfs_ops = {
+ .show = elv_attr_show,
+ .store = elv_attr_store,
+ };
+diff -urNp linux-2.6.31.7/crypto/lrw.c linux-2.6.31.7/crypto/lrw.c
+--- linux-2.6.31.7/crypto/lrw.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/crypto/lrw.c 2009-12-08 17:39:42.952669663 -0500
+@@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *par
+ struct priv *ctx = crypto_tfm_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err, i;
+- be128 tmp = { 0 };
++ be128 tmp = { 0, 0 };
+ int bsize = crypto_cipher_blocksize(child);
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+diff -urNp linux-2.6.31.7/Documentation/dontdiff linux-2.6.31.7/Documentation/dontdiff
+--- linux-2.6.31.7/Documentation/dontdiff 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/Documentation/dontdiff 2009-12-08 17:39:42.710705311 -0500
+@@ -3,6 +3,7 @@
+ *.bin
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -49,11 +50,16 @@
+ 53c700_d.h
+ CVS
+ ChangeSet
++GPATH
++GRTAGS
++GSYMS
++GTAGS
+ Image
+ Kerntypes
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
+@@ -76,7 +82,9 @@ btfixupprep
+ build
+ bvmlinux
+ bzImage*
++capflags.c
+ classlist.h*
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -103,13 +111,14 @@ gen_crc32table
+ gen_init_cpio
+ genksyms
+ *_gray256.c
++hash
+ ihex2fw
+ ikconfig.h*
+ initramfs_data.cpio
++initramfs_data.cpio.bz2
+ initramfs_data.cpio.gz
+ initramfs_list
+ kallsyms
+-kconfig
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -133,6 +142,7 @@ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mktables
+ mktree
+@@ -149,6 +159,7 @@ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+ piggy.gz
++piggy.S
+ piggyback
+ pnmtologo
+ ppc_defs.h*
+@@ -163,6 +174,7 @@ setup
+ setup.bin
+ setup.elf
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -186,14 +198,20 @@ version.h*
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
++vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
++voffset.h
+ vsyscall.lds
+ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+ zImage*
+ zconf.hash.c
++zoffset.h
+diff -urNp linux-2.6.31.7/Documentation/kernel-parameters.txt linux-2.6.31.7/Documentation/kernel-parameters.txt
+--- linux-2.6.31.7/Documentation/kernel-parameters.txt 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/Documentation/kernel-parameters.txt 2009-12-08 17:39:42.711638398 -0500
+@@ -1776,6 +1776,12 @@ and is between 256 and 4096 characters.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32.
++
++ pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -urNp linux-2.6.31.7/drivers/acpi/blacklist.c linux-2.6.31.7/drivers/acpi/blacklist.c
+--- linux-2.6.31.7/drivers/acpi/blacklist.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/blacklist.c 2009-12-08 17:39:42.953865512 -0500
+@@ -71,7 +71,7 @@ static struct acpi_blacklist_item acpi_b
+ {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
+ "Incorrect _ADR", 1},
+
+- {""}
++ {"", "", 0, NULL, all_versions, NULL, 0}
+ };
+
+ #if CONFIG_ACPI_BLACKLIST_YEAR
+diff -urNp linux-2.6.31.7/drivers/acpi/dock.c linux-2.6.31.7/drivers/acpi/dock.c
+--- linux-2.6.31.7/drivers/acpi/dock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/dock.c 2009-12-08 17:39:42.964666169 -0500
+@@ -75,7 +75,7 @@ struct dock_dependent_device {
+ struct list_head list;
+ struct list_head hotplug_list;
+ acpi_handle handle;
+- struct acpi_dock_ops *ops;
++ const struct acpi_dock_ops *ops;
+ void *context;
+ };
+
+@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
+ * the dock driver after _DCK is executed.
+ */
+ int
+-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
++register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
+ void *context)
+ {
+ struct dock_dependent_device *dd;
+diff -urNp linux-2.6.31.7/drivers/acpi/osl.c linux-2.6.31.7/drivers/acpi/osl.c
+--- linux-2.6.31.7/drivers/acpi/osl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/osl.c 2009-12-08 17:39:42.964666169 -0500
+@@ -521,6 +521,8 @@ acpi_os_read_memory(acpi_physical_addres
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
++ if (!virt_addr)
++ return AE_NO_MEMORY;
+ if (!value)
+ value = &dummy;
+
+@@ -549,6 +551,8 @@ acpi_os_write_memory(acpi_physical_addre
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
++ if (!virt_addr)
++ return AE_NO_MEMORY;
+
+ switch (width) {
+ case 8:
+diff -urNp linux-2.6.31.7/drivers/acpi/processor_core.c linux-2.6.31.7/drivers/acpi/processor_core.c
+--- linux-2.6.31.7/drivers/acpi/processor_core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/processor_core.c 2009-12-08 17:39:42.965667375 -0500
+@@ -712,7 +712,7 @@ static int __cpuinit acpi_processor_star
+ return 0;
+ }
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check
+diff -urNp linux-2.6.31.7/drivers/acpi/processor_idle.c linux-2.6.31.7/drivers/acpi/processor_idle.c
+--- linux-2.6.31.7/drivers/acpi/processor_idle.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/processor_idle.c 2009-12-08 17:39:42.965667375 -0500
+@@ -108,7 +108,7 @@ static struct dmi_system_id __cpuinitdat
+ DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
+ (void *)2},
+- {},
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL},
+ };
+
+
+diff -urNp linux-2.6.31.7/drivers/acpi/sleep.c linux-2.6.31.7/drivers/acpi/sleep.c
+--- linux-2.6.31.7/drivers/acpi/sleep.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/sleep.c 2009-12-08 17:39:42.997556127 -0500
+@@ -297,7 +297,7 @@ static int acpi_suspend_state_valid(susp
+ }
+ }
+
+-static struct platform_suspend_ops acpi_suspend_ops = {
++static const struct platform_suspend_ops acpi_suspend_ops = {
+ .valid = acpi_suspend_state_valid,
+ .begin = acpi_suspend_begin,
+ .prepare_late = acpi_pm_prepare,
+@@ -325,7 +325,7 @@ static int acpi_suspend_begin_old(suspen
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+-static struct platform_suspend_ops acpi_suspend_ops_old = {
++static const struct platform_suspend_ops acpi_suspend_ops_old = {
+ .valid = acpi_suspend_state_valid,
+ .begin = acpi_suspend_begin_old,
+ .prepare_late = acpi_pm_disable_gpes,
+@@ -512,7 +512,7 @@ static void acpi_pm_enable_gpes(void)
+ acpi_enable_all_runtime_gpes();
+ }
+
+-static struct platform_hibernation_ops acpi_hibernation_ops = {
++static const struct platform_hibernation_ops acpi_hibernation_ops = {
+ .begin = acpi_hibernation_begin,
+ .end = acpi_pm_end,
+ .pre_snapshot = acpi_hibernation_pre_snapshot,
+@@ -565,7 +565,7 @@ static int acpi_hibernation_pre_snapshot
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
++static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
+ .begin = acpi_hibernation_begin_old,
+ .end = acpi_pm_end,
+ .pre_snapshot = acpi_hibernation_pre_snapshot_old,
+diff -urNp linux-2.6.31.7/drivers/acpi/video.c linux-2.6.31.7/drivers/acpi/video.c
+--- linux-2.6.31.7/drivers/acpi/video.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/acpi/video.c 2009-12-08 17:39:42.998670467 -0500
+@@ -283,7 +283,7 @@ static int acpi_video_device_brightness_
+ struct file *file);
+ static ssize_t acpi_video_device_write_brightness(struct file *file,
+ const char __user *buffer, size_t count, loff_t *data);
+-static struct file_operations acpi_video_device_brightness_fops = {
++static const struct file_operations acpi_video_device_brightness_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_brightness_open_fs,
+ .read = seq_read,
+@@ -357,7 +357,7 @@ static int acpi_video_set_brightness(str
+ vd->brightness->levels[request_level]);
+ }
+
+-static struct backlight_ops acpi_backlight_ops = {
++static const struct backlight_ops acpi_backlight_ops = {
+ .get_brightness = acpi_video_get_brightness,
+ .update_status = acpi_video_set_brightness,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/ahci.c linux-2.6.31.7/drivers/ata/ahci.c
+--- linux-2.6.31.7/drivers/ata/ahci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/ahci.c 2009-12-08 17:39:43.011675146 -0500
+@@ -351,7 +351,7 @@ static struct scsi_host_template ahci_sh
+ .sdev_attrs = ahci_sdev_attrs,
+ };
+
+-static struct ata_port_operations ahci_ops = {
++static const struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+@@ -388,17 +388,17 @@ static struct ata_port_operations ahci_o
+ .port_stop = ahci_port_stop,
+ };
+
+-static struct ata_port_operations ahci_vt8251_ops = {
++static const struct ata_port_operations ahci_vt8251_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_vt8251_hardreset,
+ };
+
+-static struct ata_port_operations ahci_p5wdh_ops = {
++static const struct ata_port_operations ahci_p5wdh_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_p5wdh_hardreset,
+ };
+
+-static struct ata_port_operations ahci_sb600_ops = {
++static const struct ata_port_operations ahci_sb600_ops = {
+ .inherits = &ahci_ops,
+ .softreset = ahci_sb600_softreset,
+ .pmp_softreset = ahci_sb600_softreset,
+@@ -630,7 +630,7 @@ static const struct pci_device_id ahci_p
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+
+- { } /* terminate list */
++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
+ };
+
+
+diff -urNp linux-2.6.31.7/drivers/ata/ata_generic.c linux-2.6.31.7/drivers/ata/ata_generic.c
+--- linux-2.6.31.7/drivers/ata/ata_generic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/ata_generic.c 2009-12-08 17:39:43.012656841 -0500
+@@ -95,7 +95,7 @@ static struct scsi_host_template generic
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations generic_port_ops = {
++static const struct ata_port_operations generic_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = generic_set_mode,
+diff -urNp linux-2.6.31.7/drivers/ata/ata_piix.c linux-2.6.31.7/drivers/ata/ata_piix.c
+--- linux-2.6.31.7/drivers/ata/ata_piix.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/ata_piix.c 2009-12-08 17:39:43.023650585 -0500
+@@ -291,7 +291,7 @@ static const struct pci_device_id piix_p
+ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+- { } /* terminate list */
++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
+ };
+
+ static struct pci_driver piix_pci_driver = {
+@@ -309,7 +309,7 @@ static struct scsi_host_template piix_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations piix_pata_ops = {
++static const struct ata_port_operations piix_pata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+@@ -317,22 +317,22 @@ static struct ata_port_operations piix_p
+ .prereset = piix_pata_prereset,
+ };
+
+-static struct ata_port_operations piix_vmw_ops = {
++static const struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+ };
+
+-static struct ata_port_operations ich_pata_ops = {
++static const struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+ };
+
+-static struct ata_port_operations piix_sata_ops = {
++static const struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ };
+
+-static struct ata_port_operations piix_sidpr_sata_ops = {
++static const struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+@@ -608,7 +608,7 @@ static const struct ich_laptop ich_lapto
+ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
+ /* end marker */
+- { 0, }
++ { 0, 0, 0 }
+ };
+
+ /**
+@@ -1086,7 +1086,7 @@ static int piix_broken_suspend(void)
+ },
+ },
+
+- { } /* terminate list */
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */
+ };
+ static const char *oemstrs[] = {
+ "Tecra M3,",
+diff -urNp linux-2.6.31.7/drivers/ata/libata-acpi.c linux-2.6.31.7/drivers/ata/libata-acpi.c
+--- linux-2.6.31.7/drivers/ata/libata-acpi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/libata-acpi.c 2009-12-08 17:39:43.023650585 -0500
+@@ -233,12 +233,12 @@ static void ata_acpi_dev_uevent(acpi_han
+ ata_acpi_uevent(dev->link->ap, dev, event);
+ }
+
+-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
++static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+ .handler = ata_acpi_dev_notify_dock,
+ .uevent = ata_acpi_dev_uevent,
+ };
+
+-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
++static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+ .handler = ata_acpi_ap_notify_dock,
+ .uevent = ata_acpi_ap_uevent,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/libata-core.c linux-2.6.31.7/drivers/ata/libata-core.c
+--- linux-2.6.31.7/drivers/ata/libata-core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/libata-core.c 2009-12-08 17:39:43.036573283 -0500
+@@ -896,7 +896,7 @@ static const struct ata_xfer_ent {
+ { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
+ { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
+ { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
+- { -1, },
++ { -1, 0, 0 }
+ };
+
+ /**
+@@ -3141,7 +3141,7 @@ static const struct ata_timing ata_timin
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
+
+- { 0xFF }
++ { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
+@@ -4339,7 +4339,7 @@ static const struct ata_blacklist_entry
+ { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
+
+ /* End Marker */
+- { }
++ { NULL, NULL, 0 }
+ };
+
+ static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+@@ -5909,7 +5909,7 @@ static void ata_host_stop(struct device
+ * LOCKING:
+ * None.
+ */
+-static void ata_finalize_port_ops(struct ata_port_operations *ops)
++static void ata_finalize_port_ops(const struct ata_port_operations *ops)
+ {
+ static DEFINE_SPINLOCK(lock);
+ const struct ata_port_operations *cur;
+@@ -5921,6 +5921,7 @@ static void ata_finalize_port_ops(struct
+ return;
+
+ spin_lock(&lock);
++ pax_open_kernel();
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+@@ -5934,8 +5935,9 @@ static void ata_finalize_port_ops(struct
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+- ops->inherits = NULL;
++ ((struct ata_port_operations *)ops)->inherits = NULL;
+
++ pax_close_kernel();
+ spin_unlock(&lock);
+ }
+
+@@ -6032,7 +6034,7 @@ int ata_host_start(struct ata_host *host
+ */
+ /* KILLME - the only user left is ipr */
+ void ata_host_init(struct ata_host *host, struct device *dev,
+- unsigned long flags, struct ata_port_operations *ops)
++ unsigned long flags, const struct ata_port_operations *ops)
+ {
+ spin_lock_init(&host->lock);
+ host->dev = dev;
+@@ -6695,7 +6697,7 @@ static void ata_dummy_error_handler(stru
+ /* truly dummy */
+ }
+
+-struct ata_port_operations ata_dummy_port_ops = {
++const struct ata_port_operations ata_dummy_port_ops = {
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = ata_dummy_qc_issue,
+ .error_handler = ata_dummy_error_handler,
+diff -urNp linux-2.6.31.7/drivers/ata/libata-eh.c linux-2.6.31.7/drivers/ata/libata-eh.c
+--- linux-2.6.31.7/drivers/ata/libata-eh.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/libata-eh.c 2009-12-08 17:39:43.038539515 -0500
+@@ -3154,9 +3154,9 @@ static int ata_eh_handle_dev_fail(struct
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+-int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+- ata_postreset_fn_t postreset,
++int ata_eh_recover(struct ata_port *ap, const ata_prereset_fn_t prereset,
++ const ata_reset_fn_t softreset, const ata_reset_fn_t hardreset,
++ const ata_postreset_fn_t postreset,
+ struct ata_link **r_failed_link)
+ {
+ struct ata_link *link;
+@@ -3417,9 +3417,9 @@ void ata_eh_finish(struct ata_port *ap)
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+-void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
+- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+- ata_postreset_fn_t postreset)
++void ata_do_eh(struct ata_port *ap, const ata_prereset_fn_t prereset,
++ const ata_reset_fn_t softreset, const ata_reset_fn_t hardreset,
++ const ata_postreset_fn_t postreset)
+ {
+ struct ata_device *dev;
+ int rc;
+@@ -3448,7 +3448,7 @@ void ata_do_eh(struct ata_port *ap, ata_
+ */
+ void ata_std_error_handler(struct ata_port *ap)
+ {
+- struct ata_port_operations *ops = ap->ops;
++ const struct ata_port_operations *ops = ap->ops;
+ ata_reset_fn_t hardreset = ops->hardreset;
+
+ /* ignore built-in hardreset if SCR access is not available */
+diff -urNp linux-2.6.31.7/drivers/ata/libata.h linux-2.6.31.7/drivers/ata/libata.h
+--- linux-2.6.31.7/drivers/ata/libata.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/libata.h 2009-12-08 17:39:43.054617370 -0500
+@@ -38,7 +38,7 @@ struct ata_scsi_args {
+ void (*done)(struct scsi_cmnd *);
+ };
+
+-static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
++static inline int ata_is_builtin_hardreset(const ata_reset_fn_t reset)
+ {
+ if (reset == sata_std_hardreset)
+ return 1;
+@@ -169,9 +169,9 @@ extern int ata_eh_reset(struct ata_link
+ ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+ ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
+ extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+-extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+- ata_postreset_fn_t postreset,
++extern int ata_eh_recover(struct ata_port *ap, const ata_prereset_fn_t prereset,
++ const ata_reset_fn_t softreset, const ata_reset_fn_t hardreset,
++ const ata_postreset_fn_t postreset,
+ struct ata_link **r_failed_disk);
+ extern void ata_eh_finish(struct ata_port *ap);
+
+diff -urNp linux-2.6.31.7/drivers/ata/libata-pmp.c linux-2.6.31.7/drivers/ata/libata-pmp.c
+--- linux-2.6.31.7/drivers/ata/libata-pmp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/libata-pmp.c 2009-12-08 17:39:43.046592949 -0500
+@@ -839,7 +839,7 @@ static int sata_pmp_handle_link_fail(str
+ */
+ static int sata_pmp_eh_recover(struct ata_port *ap)
+ {
+- struct ata_port_operations *ops = ap->ops;
++ const struct ata_port_operations *ops = ap->ops;
+ int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
+ struct ata_link *pmp_link = &ap->link;
+ struct ata_device *pmp_dev = pmp_link->device;
+diff -urNp linux-2.6.31.7/drivers/ata/pata_acpi.c linux-2.6.31.7/drivers/ata/pata_acpi.c
+--- linux-2.6.31.7/drivers/ata/pata_acpi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_acpi.c 2009-12-08 17:39:43.054617370 -0500
+@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pacpi_ops = {
++static const struct ata_port_operations pacpi_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = pacpi_qc_issue,
+ .cable_detect = pacpi_cable_detect,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_ali.c linux-2.6.31.7/drivers/ata/pata_ali.c
+--- linux-2.6.31.7/drivers/ata/pata_ali.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_ali.c 2009-12-08 17:39:43.060766288 -0500
+@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
+ * Port operations for PIO only ALi
+ */
+
+-static struct ata_port_operations ali_early_port_ops = {
++static const struct ata_port_operations ali_early_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ali_set_piomode,
+@@ -382,7 +382,7 @@ static const struct ata_port_operations
+ * Port operations for DMA capable ALi without cable
+ * detect
+ */
+-static struct ata_port_operations ali_20_port_ops = {
++static const struct ata_port_operations ali_20_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .cable_detect = ata_cable_40wire,
+ .mode_filter = ali_20_filter,
+@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
+ /*
+ * Port operations for DMA capable ALi with cable detect
+ */
+-static struct ata_port_operations ali_c2_port_ops = {
++static const struct ata_port_operations ali_c2_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
+ /*
+ * Port operations for DMA capable ALi with cable detect
+ */
+-static struct ata_port_operations ali_c4_port_ops = {
++static const struct ata_port_operations ali_c4_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
+ /*
+ * Port operations for DMA capable ALi with cable detect and LBA48
+ */
+-static struct ata_port_operations ali_c5_port_ops = {
++static const struct ata_port_operations ali_c5_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .dev_config = ali_warn_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_amd.c linux-2.6.31.7/drivers/ata/pata_amd.c
+--- linux-2.6.31.7/drivers/ata/pata_amd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_amd.c 2009-12-08 17:39:43.060766288 -0500
+@@ -397,28 +397,28 @@ static const struct ata_port_operations
+ .prereset = amd_pre_reset,
+ };
+
+-static struct ata_port_operations amd33_port_ops = {
++static const struct ata_port_operations amd33_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = amd33_set_piomode,
+ .set_dmamode = amd33_set_dmamode,
+ };
+
+-static struct ata_port_operations amd66_port_ops = {
++static const struct ata_port_operations amd66_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd66_set_piomode,
+ .set_dmamode = amd66_set_dmamode,
+ };
+
+-static struct ata_port_operations amd100_port_ops = {
++static const struct ata_port_operations amd100_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd100_set_piomode,
+ .set_dmamode = amd100_set_dmamode,
+ };
+
+-static struct ata_port_operations amd133_port_ops = {
++static const struct ata_port_operations amd133_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = amd_cable_detect,
+ .set_piomode = amd133_set_piomode,
+@@ -433,13 +433,13 @@ static const struct ata_port_operations
+ .host_stop = nv_host_stop,
+ };
+
+-static struct ata_port_operations nv100_port_ops = {
++static const struct ata_port_operations nv100_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv100_set_piomode,
+ .set_dmamode = nv100_set_dmamode,
+ };
+
+-static struct ata_port_operations nv133_port_ops = {
++static const struct ata_port_operations nv133_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv133_set_piomode,
+ .set_dmamode = nv133_set_dmamode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_artop.c linux-2.6.31.7/drivers/ata/pata_artop.c
+--- linux-2.6.31.7/drivers/ata/pata_artop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_artop.c 2009-12-08 17:39:43.061677371 -0500
+@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations artop6210_ops = {
++static const struct ata_port_operations artop6210_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = artop6210_set_piomode,
+@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
+ .qc_defer = artop6210_qc_defer,
+ };
+
+-static struct ata_port_operations artop6260_ops = {
++static const struct ata_port_operations artop6260_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = artop6260_cable_detect,
+ .set_piomode = artop6260_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_at32.c linux-2.6.31.7/drivers/ata/pata_at32.c
+--- linux-2.6.31.7/drivers/ata/pata_at32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_at32.c 2009-12-08 17:39:43.061677371 -0500
+@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations at32_port_ops = {
++static const struct ata_port_operations at32_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = pata_at32_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_at91.c linux-2.6.31.7/drivers/ata/pata_at91.c
+--- linux-2.6.31.7/drivers/ata/pata_at91.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_at91.c 2009-12-08 17:39:43.061677371 -0500
+@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pata_at91_port_ops = {
++static const struct ata_port_operations pata_at91_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_data_xfer = pata_at91_data_xfer_noirq,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_atiixp.c linux-2.6.31.7/drivers/ata/pata_atiixp.c
+--- linux-2.6.31.7/drivers/ata/pata_atiixp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_atiixp.c 2009-12-08 17:39:43.062682228 -0500
+@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations atiixp_port_ops = {
++static const struct ata_port_operations atiixp_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_sff_dumb_qc_prep,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_bf54x.c linux-2.6.31.7/drivers/ata/pata_bf54x.c
+--- linux-2.6.31.7/drivers/ata/pata_bf54x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_bf54x.c 2009-12-08 17:39:43.062682228 -0500
+@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations bfin_pata_ops = {
++static const struct ata_port_operations bfin_pata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .set_piomode = bfin_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cmd640.c linux-2.6.31.7/drivers/ata/pata_cmd640.c
+--- linux-2.6.31.7/drivers/ata/pata_cmd640.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cmd640.c 2009-12-08 17:39:43.063682626 -0500
+@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cmd640_port_ops = {
++static const struct ata_port_operations cmd640_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ /* In theory xfer_noirq is not needed once we kill the prefetcher */
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cmd64x.c linux-2.6.31.7/drivers/ata/pata_cmd64x.c
+--- linux-2.6.31.7/drivers/ata/pata_cmd64x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cmd64x.c 2009-12-08 17:39:43.063682626 -0500
+@@ -275,18 +275,18 @@ static const struct ata_port_operations
+ .set_dmamode = cmd64x_set_dmamode,
+ };
+
+-static struct ata_port_operations cmd64x_port_ops = {
++static const struct ata_port_operations cmd64x_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations cmd646r1_port_ops = {
++static const struct ata_port_operations cmd646r1_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd646r1_bmdma_stop,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations cmd648_port_ops = {
++static const struct ata_port_operations cmd648_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd648_bmdma_stop,
+ .cable_detect = cmd648_cable_detect,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cs5520.c linux-2.6.31.7/drivers/ata/pata_cs5520.c
+--- linux-2.6.31.7/drivers/ata/pata_cs5520.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cs5520.c 2009-12-08 17:39:43.068682824 -0500
+@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations cs5520_port_ops = {
++static const struct ata_port_operations cs5520_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cs5530.c linux-2.6.31.7/drivers/ata/pata_cs5530.c
+--- linux-2.6.31.7/drivers/ata/pata_cs5530.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cs5530.c 2009-12-08 17:39:43.073628289 -0500
+@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations cs5530_port_ops = {
++static const struct ata_port_operations cs5530_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_sff_dumb_qc_prep,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cs5535.c linux-2.6.31.7/drivers/ata/pata_cs5535.c
+--- linux-2.6.31.7/drivers/ata/pata_cs5535.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cs5535.c 2009-12-08 17:39:43.074691443 -0500
+@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cs5535_port_ops = {
++static const struct ata_port_operations cs5535_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = cs5535_cable_detect,
+ .set_piomode = cs5535_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cs5536.c linux-2.6.31.7/drivers/ata/pata_cs5536.c
+--- linux-2.6.31.7/drivers/ata/pata_cs5536.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cs5536.c 2009-12-08 17:39:43.074691443 -0500
+@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cs5536_port_ops = {
++static const struct ata_port_operations cs5536_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = cs5536_cable_detect,
+ .set_piomode = cs5536_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_cypress.c linux-2.6.31.7/drivers/ata/pata_cypress.c
+--- linux-2.6.31.7/drivers/ata/pata_cypress.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_cypress.c 2009-12-08 17:39:43.075682885 -0500
+@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations cy82c693_port_ops = {
++static const struct ata_port_operations cy82c693_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cy82c693_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_efar.c linux-2.6.31.7/drivers/ata/pata_efar.c
+--- linux-2.6.31.7/drivers/ata/pata_efar.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_efar.c 2009-12-08 17:39:43.075682885 -0500
+@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations efar_ops = {
++static const struct ata_port_operations efar_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = efar_cable_detect,
+ .set_piomode = efar_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_hpt366.c linux-2.6.31.7/drivers/ata/pata_hpt366.c
+--- linux-2.6.31.7/drivers/ata/pata_hpt366.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_hpt366.c 2009-12-08 17:39:43.075682885 -0500
+@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
+ * Configuration for HPT366/68
+ */
+
+-static struct ata_port_operations hpt366_port_ops = {
++static const struct ata_port_operations hpt366_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = hpt36x_cable_detect,
+ .mode_filter = hpt366_filter,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_hpt37x.c linux-2.6.31.7/drivers/ata/pata_hpt37x.c
+--- linux-2.6.31.7/drivers/ata/pata_hpt37x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_hpt37x.c 2009-12-08 17:39:43.079681836 -0500
+@@ -578,7 +578,7 @@ static struct scsi_host_template hpt37x_
+ * Configuration for HPT370
+ */
+
+-static struct ata_port_operations hpt370_port_ops = {
++static const struct ata_port_operations hpt370_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt370_bmdma_stop,
+@@ -593,7 +593,7 @@ static struct ata_port_operations hpt370
+ * Configuration for HPT370A. Close to 370 but less filters
+ */
+
+-static struct ata_port_operations hpt370a_port_ops = {
++static const struct ata_port_operations hpt370a_port_ops = {
+ .inherits = &hpt370_port_ops,
+ .mode_filter = hpt370a_filter,
+ };
+@@ -603,7 +603,7 @@ static struct ata_port_operations hpt370
+ * and DMA mode setting functionality.
+ */
+
+-static struct ata_port_operations hpt372_port_ops = {
++static const struct ata_port_operations hpt372_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt37x_bmdma_stop,
+@@ -618,7 +618,7 @@ static struct ata_port_operations hpt372
+ * but we have a different cable detection procedure for function 1.
+ */
+
+-static struct ata_port_operations hpt374_fn1_port_ops = {
++static const struct ata_port_operations hpt374_fn1_port_ops = {
+ .inherits = &hpt372_port_ops,
+ .prereset = hpt374_fn1_pre_reset,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_hpt3x2n.c linux-2.6.31.7/drivers/ata/pata_hpt3x2n.c
+--- linux-2.6.31.7/drivers/ata/pata_hpt3x2n.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_hpt3x2n.c 2009-12-08 17:39:43.081678328 -0500
+@@ -334,7 +334,7 @@ static struct scsi_host_template hpt3x2n
+ * Configuration for HPT3x2n.
+ */
+
+-static struct ata_port_operations hpt3x2n_port_ops = {
++static const struct ata_port_operations hpt3x2n_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt3x2n_bmdma_stop,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_hpt3x3.c linux-2.6.31.7/drivers/ata/pata_hpt3x3.c
+--- linux-2.6.31.7/drivers/ata/pata_hpt3x3.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_hpt3x3.c 2009-12-08 17:39:43.081678328 -0500
+@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations hpt3x3_port_ops = {
++static const struct ata_port_operations hpt3x3_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = hpt3x3_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_icside.c linux-2.6.31.7/drivers/ata/pata_icside.c
+--- linux-2.6.31.7/drivers/ata/pata_icside.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_icside.c 2009-12-08 17:39:43.081678328 -0500
+@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
+ }
+ }
+
+-static struct ata_port_operations pata_icside_port_ops = {
++static const struct ata_port_operations pata_icside_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ /* no need to build any PRD tables for DMA */
+ .qc_prep = ata_noop_qc_prep,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_isapnp.c linux-2.6.31.7/drivers/ata/pata_isapnp.c
+--- linux-2.6.31.7/drivers/ata/pata_isapnp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_isapnp.c 2009-12-08 17:39:43.082678901 -0500
+@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations isapnp_port_ops = {
++static const struct ata_port_operations isapnp_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ };
+
+-static struct ata_port_operations isapnp_noalt_port_ops = {
++static const struct ata_port_operations isapnp_noalt_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ /* No altstatus so we don't want to use the lost interrupt poll */
+diff -urNp linux-2.6.31.7/drivers/ata/pata_it8213.c linux-2.6.31.7/drivers/ata/pata_it8213.c
+--- linux-2.6.31.7/drivers/ata/pata_it8213.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_it8213.c 2009-12-08 17:39:43.082678901 -0500
+@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
+ };
+
+
+-static struct ata_port_operations it8213_ops = {
++static const struct ata_port_operations it8213_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = it8213_cable_detect,
+ .set_piomode = it8213_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_it821x.c linux-2.6.31.7/drivers/ata/pata_it821x.c
+--- linux-2.6.31.7/drivers/ata/pata_it821x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_it821x.c 2009-12-08 17:39:43.083690294 -0500
+@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations it821x_smart_port_ops = {
++static const struct ata_port_operations it821x_smart_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
+ .port_start = it821x_port_start,
+ };
+
+-static struct ata_port_operations it821x_passthru_port_ops = {
++static const struct ata_port_operations it821x_passthru_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
+ .port_start = it821x_port_start,
+ };
+
+-static struct ata_port_operations it821x_rdc_port_ops = {
++static const struct ata_port_operations it821x_rdc_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_ixp4xx_cf.c linux-2.6.31.7/drivers/ata/pata_ixp4xx_cf.c
+--- linux-2.6.31.7/drivers/ata/pata_ixp4xx_cf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_ixp4xx_cf.c 2009-12-08 17:39:43.083690294 -0500
+@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ixp4xx_port_ops = {
++static const struct ata_port_operations ixp4xx_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ixp4xx_mmio_data_xfer,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_jmicron.c linux-2.6.31.7/drivers/ata/pata_jmicron.c
+--- linux-2.6.31.7/drivers/ata/pata_jmicron.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_jmicron.c 2009-12-08 17:39:43.084676920 -0500
+@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations jmicron_ops = {
++static const struct ata_port_operations jmicron_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = jmicron_pre_reset,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_legacy.c linux-2.6.31.7/drivers/ata/pata_legacy.c
+--- linux-2.6.31.7/drivers/ata/pata_legacy.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_legacy.c 2009-12-08 17:39:43.094582414 -0500
+@@ -106,7 +106,7 @@ struct legacy_probe {
+
+ struct legacy_controller {
+ const char *name;
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ unsigned int pio_mask;
+ unsigned int flags;
+ unsigned int pflags;
+@@ -223,12 +223,12 @@ static const struct ata_port_operations
+ * pio_mask as well.
+ */
+
+-static struct ata_port_operations simple_port_ops = {
++static const struct ata_port_operations simple_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ };
+
+-static struct ata_port_operations legacy_port_ops = {
++static const struct ata_port_operations legacy_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .set_mode = legacy_set_mode,
+@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
+ return buflen;
+ }
+
+-static struct ata_port_operations pdc20230_port_ops = {
++static const struct ata_port_operations pdc20230_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = pdc20230_set_piomode,
+ .sff_data_xfer = pdc_data_xfer_vlb,
+@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
+ ioread8(ap->ioaddr.status_addr);
+ }
+
+-static struct ata_port_operations ht6560a_port_ops = {
++static const struct ata_port_operations ht6560a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560a_set_piomode,
+ };
+@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
+ ioread8(ap->ioaddr.status_addr);
+ }
+
+-static struct ata_port_operations ht6560b_port_ops = {
++static const struct ata_port_operations ht6560b_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560b_set_piomode,
+ };
+@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
+ }
+
+
+-static struct ata_port_operations opti82c611a_port_ops = {
++static const struct ata_port_operations opti82c611a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c611a_set_piomode,
+ };
+@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
+ return ata_sff_qc_issue(qc);
+ }
+
+-static struct ata_port_operations opti82c46x_port_ops = {
++static const struct ata_port_operations opti82c46x_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c46x_set_piomode,
+ .qc_issue = opti82c46x_qc_issue,
+@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
+ return 0;
+ }
+
+-static struct ata_port_operations qdi6500_port_ops = {
++static const struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6500_set_piomode,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = vlb32_data_xfer,
+ };
+
+-static struct ata_port_operations qdi6580_port_ops = {
++static const struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+ };
+
+-static struct ata_port_operations qdi6580dp_port_ops = {
++static const struct ata_port_operations qdi6580dp_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580dp_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
+ return 0;
+ }
+
+-static struct ata_port_operations winbond_port_ops = {
++static const struct ata_port_operations winbond_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = winbond_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
+ int pio_modes = controller->pio_mask;
+ unsigned long io = probe->port;
+ u32 mask = (1 << probe->slot);
+- struct ata_port_operations *ops = controller->ops;
++ const struct ata_port_operations *ops = controller->ops;
+ struct legacy_data *ld = &legacy_data[probe->slot];
+ struct ata_host *host = NULL;
+ struct ata_port *ap;
+diff -urNp linux-2.6.31.7/drivers/ata/pata_marvell.c linux-2.6.31.7/drivers/ata/pata_marvell.c
+--- linux-2.6.31.7/drivers/ata/pata_marvell.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_marvell.c 2009-12-08 17:39:43.094582414 -0500
+@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations marvell_ops = {
++static const struct ata_port_operations marvell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = marvell_cable_detect,
+ .prereset = marvell_pre_reset,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_mpc52xx.c linux-2.6.31.7/drivers/ata/pata_mpc52xx.c
+--- linux-2.6.31.7/drivers/ata/pata_mpc52xx.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_mpc52xx.c 2009-12-08 17:39:43.094582414 -0500
+@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations mpc52xx_ata_port_ops = {
++static const struct ata_port_operations mpc52xx_ata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_dev_select = mpc52xx_ata_dev_select,
+ .set_piomode = mpc52xx_ata_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_mpiix.c linux-2.6.31.7/drivers/ata/pata_mpiix.c
+--- linux-2.6.31.7/drivers/ata/pata_mpiix.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_mpiix.c 2009-12-08 17:39:43.094582414 -0500
+@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations mpiix_port_ops = {
++static const struct ata_port_operations mpiix_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = mpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_netcell.c linux-2.6.31.7/drivers/ata/pata_netcell.c
+--- linux-2.6.31.7/drivers/ata/pata_netcell.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_netcell.c 2009-12-08 17:39:43.095686026 -0500
+@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations netcell_ops = {
++static const struct ata_port_operations netcell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_80wire,
+ .read_id = netcell_read_id,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_ninja32.c linux-2.6.31.7/drivers/ata/pata_ninja32.c
+--- linux-2.6.31.7/drivers/ata/pata_ninja32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_ninja32.c 2009-12-08 17:39:43.095686026 -0500
+@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ninja32_port_ops = {
++static const struct ata_port_operations ninja32_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_dev_select = ninja32_dev_select,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_ns87410.c linux-2.6.31.7/drivers/ata/pata_ns87410.c
+--- linux-2.6.31.7/drivers/ata/pata_ns87410.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_ns87410.c 2009-12-08 17:39:43.095686026 -0500
+@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations ns87410_port_ops = {
++static const struct ata_port_operations ns87410_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = ns87410_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_ns87415.c linux-2.6.31.7/drivers/ata/pata_ns87415.c
+--- linux-2.6.31.7/drivers/ata/pata_ns87415.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_ns87415.c 2009-12-08 17:39:43.096529000 -0500
+@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
+ }
+ #endif /* 87560 SuperIO Support */
+
+-static struct ata_port_operations ns87415_pata_ops = {
++static const struct ata_port_operations ns87415_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma = ns87415_check_atapi_dma,
+@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
+ };
+
+ #if defined(CONFIG_SUPERIO)
+-static struct ata_port_operations ns87560_pata_ops = {
++static const struct ata_port_operations ns87560_pata_ops = {
+ .inherits = &ns87415_pata_ops,
+ .sff_tf_read = ns87560_tf_read,
+ .sff_check_status = ns87560_check_status,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_oldpiix.c linux-2.6.31.7/drivers/ata/pata_oldpiix.c
+--- linux-2.6.31.7/drivers/ata/pata_oldpiix.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_oldpiix.c 2009-12-08 17:39:43.096529000 -0500
+@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations oldpiix_pata_ops = {
++static const struct ata_port_operations oldpiix_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = oldpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_opti.c linux-2.6.31.7/drivers/ata/pata_opti.c
+--- linux-2.6.31.7/drivers/ata/pata_opti.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_opti.c 2009-12-08 17:39:43.096529000 -0500
+@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations opti_port_ops = {
++static const struct ata_port_operations opti_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = opti_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_optidma.c linux-2.6.31.7/drivers/ata/pata_optidma.c
+--- linux-2.6.31.7/drivers/ata/pata_optidma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_optidma.c 2009-12-08 17:39:43.096529000 -0500
+@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations optidma_port_ops = {
++static const struct ata_port_operations optidma_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = optidma_set_pio_mode,
+@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
+ .prereset = optidma_pre_reset,
+ };
+
+-static struct ata_port_operations optiplus_port_ops = {
++static const struct ata_port_operations optiplus_port_ops = {
+ .inherits = &optidma_port_ops,
+ .set_piomode = optiplus_set_pio_mode,
+ .set_dmamode = optiplus_set_dma_mode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_palmld.c linux-2.6.31.7/drivers/ata/pata_palmld.c
+--- linux-2.6.31.7/drivers/ata/pata_palmld.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_palmld.c 2009-12-08 17:39:43.097678879 -0500
+@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations palmld_port_ops = {
++static const struct ata_port_operations palmld_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_pcmcia.c linux-2.6.31.7/drivers/ata/pata_pcmcia.c
+--- linux-2.6.31.7/drivers/ata/pata_pcmcia.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_pcmcia.c 2009-12-08 17:39:43.097678879 -0500
+@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pcmcia_port_ops = {
++static const struct ata_port_operations pcmcia_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = pcmcia_set_mode,
+ };
+
+-static struct ata_port_operations pcmcia_8bit_port_ops = {
++static const struct ata_port_operations pcmcia_8bit_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_data_xfer_8bit,
+ .cable_detect = ata_cable_40wire,
+@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
+ unsigned long io_base, ctl_base;
+ void __iomem *io_addr, *ctl_addr;
+ int n_ports = 1;
+- struct ata_port_operations *ops = &pcmcia_port_ops;
++ const struct ata_port_operations *ops = &pcmcia_port_ops;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+diff -urNp linux-2.6.31.7/drivers/ata/pata_pdc2027x.c linux-2.6.31.7/drivers/ata/pata_pdc2027x.c
+--- linux-2.6.31.7/drivers/ata/pata_pdc2027x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_pdc2027x.c 2009-12-08 17:39:43.103530278 -0500
+@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pdc2027x_pata100_ops = {
++static const struct ata_port_operations pdc2027x_pata100_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .check_atapi_dma = pdc2027x_check_atapi_dma,
+ .cable_detect = pdc2027x_cable_detect,
+ .prereset = pdc2027x_prereset,
+ };
+
+-static struct ata_port_operations pdc2027x_pata133_ops = {
++static const struct ata_port_operations pdc2027x_pata133_ops = {
+ .inherits = &pdc2027x_pata100_ops,
+ .mode_filter = pdc2027x_mode_filter,
+ .set_piomode = pdc2027x_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_pdc202xx_old.c linux-2.6.31.7/drivers/ata/pata_pdc202xx_old.c
+--- linux-2.6.31.7/drivers/ata/pata_pdc202xx_old.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_pdc202xx_old.c 2009-12-08 17:39:43.103530278 -0500
+@@ -265,7 +265,7 @@ static struct scsi_host_template pdc202x
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pdc2024x_port_ops = {
++static const struct ata_port_operations pdc2024x_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .cable_detect = ata_cable_40wire,
+@@ -273,7 +273,7 @@ static struct ata_port_operations pdc202
+ .set_dmamode = pdc202xx_set_dmamode,
+ };
+
+-static struct ata_port_operations pdc2026x_port_ops = {
++static const struct ata_port_operations pdc2026x_port_ops = {
+ .inherits = &pdc2024x_port_ops,
+
+ .check_atapi_dma = pdc2026x_check_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_platform.c linux-2.6.31.7/drivers/ata/pata_platform.c
+--- linux-2.6.31.7/drivers/ata/pata_platform.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_platform.c 2009-12-08 17:39:43.104683841 -0500
+@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations pata_platform_port_ops = {
++static const struct ata_port_operations pata_platform_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_unknown,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_qdi.c linux-2.6.31.7/drivers/ata/pata_qdi.c
+--- linux-2.6.31.7/drivers/ata/pata_qdi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_qdi.c 2009-12-08 17:39:43.104683841 -0500
+@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations qdi6500_port_ops = {
++static const struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = qdi_data_xfer,
+@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
+ .set_piomode = qdi6500_set_piomode,
+ };
+
+-static struct ata_port_operations qdi6580_port_ops = {
++static const struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &qdi6500_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_radisys.c linux-2.6.31.7/drivers/ata/pata_radisys.c
+--- linux-2.6.31.7/drivers/ata/pata_radisys.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_radisys.c 2009-12-08 17:39:43.105720630 -0500
+@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations radisys_pata_ops = {
++static const struct ata_port_operations radisys_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = radisys_qc_issue,
+ .cable_detect = ata_cable_unknown,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_rb532_cf.c linux-2.6.31.7/drivers/ata/pata_rb532_cf.c
+--- linux-2.6.31.7/drivers/ata/pata_rb532_cf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_rb532_cf.c 2009-12-08 17:39:43.105720630 -0500
+@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
+ return IRQ_HANDLED;
+ }
+
+-static struct ata_port_operations rb532_pata_port_ops = {
++static const struct ata_port_operations rb532_pata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer32,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_rz1000.c linux-2.6.31.7/drivers/ata/pata_rz1000.c
+--- linux-2.6.31.7/drivers/ata/pata_rz1000.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_rz1000.c 2009-12-08 17:39:43.105720630 -0500
+@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations rz1000_port_ops = {
++static const struct ata_port_operations rz1000_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = rz1000_set_mode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_sc1200.c linux-2.6.31.7/drivers/ata/pata_sc1200.c
+--- linux-2.6.31.7/drivers/ata/pata_sc1200.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_sc1200.c 2009-12-08 17:39:43.106685055 -0500
+@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ };
+
+-static struct ata_port_operations sc1200_port_ops = {
++static const struct ata_port_operations sc1200_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_issue = sc1200_qc_issue,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_scc.c linux-2.6.31.7/drivers/ata/pata_scc.c
+--- linux-2.6.31.7/drivers/ata/pata_scc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_scc.c 2009-12-08 17:39:43.106685055 -0500
+@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations scc_pata_ops = {
++static const struct ata_port_operations scc_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .set_piomode = scc_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_sch.c linux-2.6.31.7/drivers/ata/pata_sch.c
+--- linux-2.6.31.7/drivers/ata/pata_sch.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_sch.c 2009-12-08 17:39:43.107686492 -0500
+@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sch_pata_ops = {
++static const struct ata_port_operations sch_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = sch_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_serverworks.c linux-2.6.31.7/drivers/ata/pata_serverworks.c
+--- linux-2.6.31.7/drivers/ata/pata_serverworks.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_serverworks.c 2009-12-08 17:39:43.107686492 -0500
+@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations serverworks_osb4_port_ops = {
++static const struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+@@ -307,7 +307,7 @@ static struct ata_port_operations server
+ .set_dmamode = serverworks_set_dmamode,
+ };
+
+-static struct ata_port_operations serverworks_csb_port_ops = {
++static const struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
+ .mode_filter = serverworks_csb_filter,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_sil680.c linux-2.6.31.7/drivers/ata/pata_sil680.c
+--- linux-2.6.31.7/drivers/ata/pata_sil680.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_sil680.c 2009-12-08 17:39:43.107686492 -0500
+@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sil680_port_ops = {
++static const struct ata_port_operations sil680_port_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = sil680_cable_detect,
+ .set_piomode = sil680_set_piomode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_sis.c linux-2.6.31.7/drivers/ata/pata_sis.c
+--- linux-2.6.31.7/drivers/ata/pata_sis.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_sis.c 2009-12-08 17:39:43.108684633 -0500
+@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sis_133_for_sata_ops = {
++static const struct ata_port_operations sis_133_for_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+ };
+
+-static struct ata_port_operations sis_base_ops = {
++static const struct ata_port_operations sis_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = sis_pre_reset,
+ };
+
+-static struct ata_port_operations sis_133_ops = {
++static const struct ata_port_operations sis_133_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+ };
+
+-static struct ata_port_operations sis_133_early_ops = {
++static const struct ata_port_operations sis_133_early_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_133_early_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_100_ops = {
++static const struct ata_port_operations sis_100_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_100_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_66_ops = {
++static const struct ata_port_operations sis_66_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_66_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+ };
+
+-static struct ata_port_operations sis_old_ops = {
++static const struct ata_port_operations sis_old_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_old_set_dmamode,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_sl82c105.c linux-2.6.31.7/drivers/ata/pata_sl82c105.c
+--- linux-2.6.31.7/drivers/ata/pata_sl82c105.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_sl82c105.c 2009-12-08 17:39:43.108684633 -0500
+@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sl82c105_port_ops = {
++static const struct ata_port_operations sl82c105_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_defer = sl82c105_qc_defer,
+ .bmdma_start = sl82c105_bmdma_start,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_triflex.c linux-2.6.31.7/drivers/ata/pata_triflex.c
+--- linux-2.6.31.7/drivers/ata/pata_triflex.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_triflex.c 2009-12-08 17:39:43.108684633 -0500
+@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations triflex_port_ops = {
++static const struct ata_port_operations triflex_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .bmdma_start = triflex_bmdma_start,
+ .bmdma_stop = triflex_bmdma_stop,
+diff -urNp linux-2.6.31.7/drivers/ata/pata_via.c linux-2.6.31.7/drivers/ata/pata_via.c
+--- linux-2.6.31.7/drivers/ata/pata_via.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_via.c 2009-12-08 17:39:43.110680637 -0500
+@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations via_port_ops = {
++static const struct ata_port_operations via_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = via_cable_detect,
+ .set_piomode = via_set_piomode,
+@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
+ .port_start = via_port_start,
+ };
+
+-static struct ata_port_operations via_port_ops_noirq = {
++static const struct ata_port_operations via_port_ops_noirq = {
+ .inherits = &via_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ };
+diff -urNp linux-2.6.31.7/drivers/ata/pata_winbond.c linux-2.6.31.7/drivers/ata/pata_winbond.c
+--- linux-2.6.31.7/drivers/ata/pata_winbond.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pata_winbond.c 2009-12-08 17:39:43.111686756 -0500
+@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
+ ATA_PIO_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations winbond_port_ops = {
++static const struct ata_port_operations winbond_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = winbond_data_xfer,
+ .cable_detect = ata_cable_40wire,
+diff -urNp linux-2.6.31.7/drivers/ata/pdc_adma.c linux-2.6.31.7/drivers/ata/pdc_adma.c
+--- linux-2.6.31.7/drivers/ata/pdc_adma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/pdc_adma.c 2009-12-08 17:39:43.111686756 -0500
+@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
+ .dma_boundary = ADMA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations adma_ata_ops = {
++static const struct ata_port_operations adma_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_fsl.c linux-2.6.31.7/drivers/ata/sata_fsl.c
+--- linux-2.6.31.7/drivers/ata/sata_fsl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_fsl.c 2009-12-08 17:39:43.112653653 -0500
+@@ -1254,7 +1254,7 @@ static struct scsi_host_template sata_fs
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations sata_fsl_ops = {
++static const struct ata_port_operations sata_fsl_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_prep = sata_fsl_qc_prep,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_inic162x.c linux-2.6.31.7/drivers/ata/sata_inic162x.c
+--- linux-2.6.31.7/drivers/ata/sata_inic162x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_inic162x.c 2009-12-08 17:39:43.112653653 -0500
+@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
+ return 0;
+ }
+
+-static struct ata_port_operations inic_port_ops = {
++static const struct ata_port_operations inic_port_ops = {
+ .inherits = &sata_port_ops,
+
+ .check_atapi_dma = inic_check_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_mv.c linux-2.6.31.7/drivers/ata/sata_mv.c
+--- linux-2.6.31.7/drivers/ata/sata_mv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_mv.c 2009-12-08 17:39:43.122662501 -0500
+@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
+ .dma_boundary = MV_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations mv5_ops = {
++static const struct ata_port_operations mv5_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
+ .port_stop = mv_port_stop,
+ };
+
+-static struct ata_port_operations mv6_ops = {
++static const struct ata_port_operations mv6_ops = {
+ .inherits = &mv5_ops,
+ .dev_config = mv6_dev_config,
+ .scr_read = mv_scr_read,
+@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
+ .bmdma_status = mv_bmdma_status,
+ };
+
+-static struct ata_port_operations mv_iie_ops = {
++static const struct ata_port_operations mv_iie_ops = {
+ .inherits = &mv6_ops,
+ .dev_config = ATA_OP_NULL,
+ .qc_prep = mv_qc_prep_iie,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_nv.c linux-2.6.31.7/drivers/ata/sata_nv.c
+--- linux-2.6.31.7/drivers/ata/sata_nv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_nv.c 2009-12-08 17:39:43.147931500 -0500
+@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
+ * cases. Define nv_hardreset() which only kicks in for post-boot
+ * probing and use it for all variants.
+ */
+-static struct ata_port_operations nv_generic_ops = {
++static const struct ata_port_operations nv_generic_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .lost_interrupt = ATA_OP_NULL,
+ .scr_read = nv_scr_read,
+@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
+ .hardreset = nv_hardreset,
+ };
+
+-static struct ata_port_operations nv_nf2_ops = {
++static const struct ata_port_operations nv_nf2_ops = {
+ .inherits = &nv_generic_ops,
+ .freeze = nv_nf2_freeze,
+ .thaw = nv_nf2_thaw,
+ };
+
+-static struct ata_port_operations nv_ck804_ops = {
++static const struct ata_port_operations nv_ck804_ops = {
+ .inherits = &nv_generic_ops,
+ .freeze = nv_ck804_freeze,
+ .thaw = nv_ck804_thaw,
+ .host_stop = nv_ck804_host_stop,
+ };
+
+-static struct ata_port_operations nv_adma_ops = {
++static const struct ata_port_operations nv_adma_ops = {
+ .inherits = &nv_ck804_ops,
+
+ .check_atapi_dma = nv_adma_check_atapi_dma,
+@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
+ .host_stop = nv_adma_host_stop,
+ };
+
+-static struct ata_port_operations nv_swncq_ops = {
++static const struct ata_port_operations nv_swncq_ops = {
+ .inherits = &nv_generic_ops,
+
+ .qc_defer = ata_std_qc_defer,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_promise.c linux-2.6.31.7/drivers/ata/sata_promise.c
+--- linux-2.6.31.7/drivers/ata/sata_promise.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_promise.c 2009-12-08 17:39:43.148686753 -0500
+@@ -184,7 +184,7 @@ static const struct ata_port_operations
+ .error_handler = pdc_error_handler,
+ };
+
+-static struct ata_port_operations pdc_sata_ops = {
++static const struct ata_port_operations pdc_sata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_sata_cable_detect,
+ .freeze = pdc_sata_freeze,
+@@ -196,12 +196,12 @@ static struct ata_port_operations pdc_sa
+ };
+
+ /* First-generation chips need a more restrictive ->check_atapi_dma op */
+-static struct ata_port_operations pdc_old_sata_ops = {
++static const struct ata_port_operations pdc_old_sata_ops = {
+ .inherits = &pdc_sata_ops,
+ .check_atapi_dma = pdc_old_sata_check_atapi_dma,
+ };
+
+-static struct ata_port_operations pdc_pata_ops = {
++static const struct ata_port_operations pdc_pata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_pata_cable_detect,
+ .freeze = pdc_freeze,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_qstor.c linux-2.6.31.7/drivers/ata/sata_qstor.c
+--- linux-2.6.31.7/drivers/ata/sata_qstor.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_qstor.c 2009-12-08 17:39:43.148686753 -0500
+@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
+ .dma_boundary = QS_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations qs_ata_ops = {
++static const struct ata_port_operations qs_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = qs_check_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_sil24.c linux-2.6.31.7/drivers/ata/sata_sil24.c
+--- linux-2.6.31.7/drivers/ata/sata_sil24.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_sil24.c 2009-12-08 17:39:43.151662148 -0500
+@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ };
+
+-static struct ata_port_operations sil24_ops = {
++static const struct ata_port_operations sil24_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = sil24_qc_defer,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_sil.c linux-2.6.31.7/drivers/ata/sata_sil.c
+--- linux-2.6.31.7/drivers/ata/sata_sil.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_sil.c 2009-12-08 17:39:43.150674398 -0500
+@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
+ .sg_tablesize = ATA_MAX_PRD
+ };
+
+-static struct ata_port_operations sil_ops = {
++static const struct ata_port_operations sil_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .dev_config = sil_dev_config,
+ .set_mode = sil_set_mode,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_sis.c linux-2.6.31.7/drivers/ata/sata_sis.c
+--- linux-2.6.31.7/drivers/ata/sata_sis.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_sis.c 2009-12-08 17:39:43.151662148 -0500
+@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations sis_ops = {
++static const struct ata_port_operations sis_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = sis_scr_read,
+ .scr_write = sis_scr_write,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_svw.c linux-2.6.31.7/drivers/ata/sata_svw.c
+--- linux-2.6.31.7/drivers/ata/sata_svw.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_svw.c 2009-12-08 17:39:43.152688471 -0500
+@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
+ };
+
+
+-static struct ata_port_operations k2_sata_ops = {
++static const struct ata_port_operations k2_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = k2_sata_tf_load,
+ .sff_tf_read = k2_sata_tf_read,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_sx4.c linux-2.6.31.7/drivers/ata/sata_sx4.c
+--- linux-2.6.31.7/drivers/ata/sata_sx4.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_sx4.c 2009-12-08 17:39:43.153682729 -0500
+@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
+ };
+
+ /* TODO: inherit from base port_ops after converting to new EH */
+-static struct ata_port_operations pdc_20621_ops = {
++static const struct ata_port_operations pdc_20621_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = pdc_check_atapi_dma,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_uli.c linux-2.6.31.7/drivers/ata/sata_uli.c
+--- linux-2.6.31.7/drivers/ata/sata_uli.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_uli.c 2009-12-08 17:39:43.153682729 -0500
+@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations uli_ops = {
++static const struct ata_port_operations uli_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = uli_scr_read,
+ .scr_write = uli_scr_write,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_via.c linux-2.6.31.7/drivers/ata/sata_via.c
+--- linux-2.6.31.7/drivers/ata/sata_via.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_via.c 2009-12-08 17:39:43.154687833 -0500
+@@ -112,31 +112,31 @@ static struct scsi_host_template svia_sh
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations svia_base_ops = {
++static const struct ata_port_operations svia_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = svia_tf_load,
+ };
+
+-static struct ata_port_operations vt6420_sata_ops = {
++static const struct ata_port_operations vt6420_sata_ops = {
+ .inherits = &svia_base_ops,
+ .freeze = svia_noop_freeze,
+ .prereset = vt6420_prereset,
+ };
+
+-static struct ata_port_operations vt6421_pata_ops = {
++static const struct ata_port_operations vt6421_pata_ops = {
+ .inherits = &svia_base_ops,
+ .cable_detect = vt6421_pata_cable_detect,
+ .set_piomode = vt6421_set_pio_mode,
+ .set_dmamode = vt6421_set_dma_mode,
+ };
+
+-static struct ata_port_operations vt6421_sata_ops = {
++static const struct ata_port_operations vt6421_sata_ops = {
+ .inherits = &svia_base_ops,
+ .scr_read = svia_scr_read,
+ .scr_write = svia_scr_write,
+ };
+
+-static struct ata_port_operations vt8251_ops = {
++static const struct ata_port_operations vt8251_ops = {
+ .inherits = &svia_base_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = vt8251_scr_read,
+diff -urNp linux-2.6.31.7/drivers/ata/sata_vsc.c linux-2.6.31.7/drivers/ata/sata_vsc.c
+--- linux-2.6.31.7/drivers/ata/sata_vsc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ata/sata_vsc.c 2009-12-08 17:39:43.155687731 -0500
+@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
+ };
+
+
+-static struct ata_port_operations vsc_sata_ops = {
++static const struct ata_port_operations vsc_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ /* The IRQ handling is not quite standard SFF behaviour so we
+ cannot use the default lost interrupt handler */
+diff -urNp linux-2.6.31.7/drivers/atm/adummy.c linux-2.6.31.7/drivers/atm/adummy.c
+--- linux-2.6.31.7/drivers/atm/adummy.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/adummy.c 2009-12-08 17:39:43.155687731 -0500
+@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/drivers/atm/ambassador.c linux-2.6.31.7/drivers/atm/ambassador.c
+--- linux-2.6.31.7/drivers/atm/ambassador.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/ambassador.c 2009-12-08 17:39:43.160585264 -0500
+@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
+ PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the descriptor
+ kfree (tx_descr);
+@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
+ dump_skb ("<<<", vc, skb);
+
+ // VC layer stats
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsability
+ atm_vcc->push (atm_vcc, skb);
+@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
+ } else {
+ PRINTK (KERN_INFO, "dropped over-size frame");
+ // should we count this?
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ }
+
+ } else {
+@@ -1349,7 +1349,7 @@ static int amb_send (struct atm_vcc * at
+ }
+
+ if (check_area (skb->data, skb->len)) {
+- atomic_inc(&atm_vcc->stats->tx_err);
++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+ return -ENOMEM; // ?
+ }
+
+diff -urNp linux-2.6.31.7/drivers/atm/atmtcp.c linux-2.6.31.7/drivers/atm/atmtcp.c
+--- linux-2.6.31.7/drivers/atm/atmtcp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/atmtcp.c 2009-12-08 17:39:43.171900169 -0500
+@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ if (dev_data) return 0;
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOLINK;
+ }
+ size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (!new_skb) {
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOBUFS;
+ }
+ hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ return 0;
+ }
+
+@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
+ out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+ read_unlock(&vcc_sklist_lock);
+ if (!out_vcc) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ goto done;
+ }
+ skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
+ __net_timestamp(new_skb);
+ skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+diff -urNp linux-2.6.31.7/drivers/atm/eni.c linux-2.6.31.7/drivers/atm/eni.c
+--- linux-2.6.31.7/drivers/atm/eni.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/eni.c 2009-12-08 17:39:43.187960238 -0500
+@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+ DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+ vcc->dev->number);
+ length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ size);
+ }
+ eff = length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+ vcc->dev->number,vcc->vci,length,size << 2,descr);
+ length = eff = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+ skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -770,7 +770,7 @@ rx_dequeued++;
+ vcc->push(vcc,skb);
+ pushed++;
+ }
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ wake_up(&eni_dev->rx_wait);
+ }
+@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
+ PCI_DMA_TODEVICE);
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb_irq(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+ }
+diff -urNp linux-2.6.31.7/drivers/atm/firestream.c linux-2.6.31.7/drivers/atm/firestream.c
+--- linux-2.6.31.7/drivers/atm/firestream.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/firestream.c 2009-12-08 17:39:43.188698434 -0500
+@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
+ }
+ }
+
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ fs_dprintk (FS_DEBUG_TXMEM, "i");
+ fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
+ #endif
+ skb_put (skb, qe->p1 & 0xffff);
+ ATM_SKB(skb)->vcc = atm_vcc;
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+ atm_vcc->push (atm_vcc, skb);
+@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
+ kfree (pe);
+ }
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ case 0x1f: /* Reassembly abort: no buffers. */
+ /* Silently increment error counter. */
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
+diff -urNp linux-2.6.31.7/drivers/atm/fore200e.c linux-2.6.31.7/drivers/atm/fore200e.c
+--- linux-2.6.31.7/drivers/atm/fore200e.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/fore200e.c 2009-12-08 17:39:43.193692554 -0500
+@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
+ #endif
+ /* check error condition */
+ if (*entry->status & STATUS_ERROR)
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ else
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+ }
+
+@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
+ if (skb == NULL) {
+ DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
+
+ dev_kfree_skb_any(skb);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
+ DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+ fore200e->atm_dev->number,
+ entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+
+@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
+ goto retry_here;
+ }
+
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+
+ fore200e->tx_sat++;
+ DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff -urNp linux-2.6.31.7/drivers/atm/he.c linux-2.6.31.7/drivers/atm/he.c
+--- linux-2.6.31.7/drivers/atm/he.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/he.c 2009-12-08 17:39:43.194701373 -0500
+@@ -1728,7 +1728,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+
+ if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto return_host_buffers;
+ }
+
+@@ -1761,7 +1761,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ RBRQ_LEN_ERR(he_dev->rbrq_head)
+ ? "LEN_ERR" : "",
+ vcc->vpi, vcc->vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto return_host_buffers;
+ }
+
+@@ -1820,7 +1820,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return_host_buffers:
+ ++pdus_assembled;
+@@ -2165,7 +2165,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
+ tpd->vcc->pop(tpd->vcc, tpd->skb);
+ else
+ dev_kfree_skb_any(tpd->skb);
+- atomic_inc(&tpd->vcc->stats->tx_err);
++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+ }
+ pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ return;
+@@ -2577,7 +2577,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+
+@@ -2588,7 +2588,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+ #endif
+@@ -2600,7 +2600,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2673,7 +2673,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ __enqueue_tpd(he_dev, tpd, cid);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/drivers/atm/horizon.c linux-2.6.31.7/drivers/atm/horizon.c
+--- linux-2.6.31.7/drivers/atm/horizon.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/horizon.c 2009-12-08 17:39:43.204691531 -0500
+@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
+ {
+ struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+ // VC layer stats
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsability
+ vcc->push (vcc, skb);
+@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
+ dev->tx_iovec = NULL;
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the skb
+ hrz_kfree_skb (skb);
+diff -urNp linux-2.6.31.7/drivers/atm/idt77252.c linux-2.6.31.7/drivers/atm/idt77252.c
+--- linux-2.6.31.7/drivers/atm/idt77252.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/idt77252.c 2009-12-08 17:39:43.213544919 -0500
+@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
+ else
+ dev_kfree_skb(skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+
+ atomic_dec(&scq->used);
+@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for aal0.\n",
+ card->name);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+ card->name);
+- atomic_add(i - 1, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+ dev_kfree_skb(sb);
+ break;
+ }
+@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ cell += ATM_CELL_PAYLOAD;
+ }
+@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ "(CDC: %08x)\n",
+ card->name, len, rpp->len, readl(SAR_REG_CDC));
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (stat & SAR_RSQE_CRC) {
+ RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ RXPRINTK("%s: Can't alloc RX skb.\n",
+ card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (!atm_charge(vcc, skb->truesize)) {
+@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return;
+ }
+@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ if (skb->truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
+ if (vcc->qos.aal != ATM_AAL0) {
+ RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+ card->name, vpi, vci);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto drop;
+ }
+
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for AAL0.\n",
+ card->name);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto drop;
+ }
+
+@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ drop:
+ skb_pull(queue, 64);
+@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ if (vc == NULL) {
+ printk("%s: NULL connection in send().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (!test_bit(VCF_TX, &vc->flags)) {
+ printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+ break;
+ default:
+ printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("%s: No scatter-gather yet.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ err = queue_skb(card, vc, skb, oam);
+ if (err) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return err;
+ }
+@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
+ skb = dev_alloc_skb(64);
+ if (!skb) {
+ printk("%s: Out of memory in send_oam().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOMEM;
+ }
+ atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff -urNp linux-2.6.31.7/drivers/atm/iphase.c linux-2.6.31.7/drivers/atm/iphase.c
+--- linux-2.6.31.7/drivers/atm/iphase.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/iphase.c 2009-12-08 17:39:43.226693793 -0500
+@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
+ status = (u_short) (buf_desc_ptr->desc_mode);
+ if (status & (RX_CER | RX_PTE | RX_OFL))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("IA: bad packet, dropping it");)
+ if (status & RX_CER) {
+ IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
+ len = dma_addr - buf_addr;
+ if (len > iadev->rx_buf_sz) {
+ printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out_free_desc;
+ }
+
+@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
+ ia_vcc = INPH_IA_VCC(vcc);
+ if (ia_vcc == NULL)
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ dev_kfree_skb_any(skb);
+ atm_return(vcc, atm_guess_pdu2truesize(len));
+ goto INCR_DLE;
+@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
+ if ((length > iadev->rx_buf_sz) || (length >
+ (skb->len - sizeof(struct cpcs_trailer))))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
+ length, skb->len);)
+ dev_kfree_skb_any(skb);
+@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
+
+ IF_RX(printk("rx_dle_intr: skb push");)
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ iadev->rx_pkt_cnt++;
+ }
+ INCR_DLE:
+@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
+ {
+ struct k_sonet_stats *stats;
+ stats = &PRIV(_ia_dev[board])->sonet_stats;
+- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
+- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
+- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
+- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
+- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
+- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
+- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
+ }
+ ia_cmds.status = 0;
+ break;
+@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ if ((desc == 0) || (desc > iadev->num_tx_desc))
+ {
+ IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ ATM_DESC(skb) = vcc->vci;
+ skb_queue_tail(&iadev->tx_dma_q, skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ iadev->tx_pkt_cnt++;
+ /* Increment transaction counter */
+ writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
+
+ #if 0
+ /* add flow control logic */
+- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+ if (iavcc->vc_desc_cnt > 10) {
+ vcc->tx_quota = vcc->tx_quota * 3 / 4;
+ printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff -urNp linux-2.6.31.7/drivers/atm/lanai.c linux-2.6.31.7/drivers/atm/lanai.c
+--- linux-2.6.31.7/drivers/atm/lanai.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/lanai.c 2009-12-08 17:39:43.232698080 -0500
+@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
+ vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+ lanai_endtx(lanai, lvcc);
+ lanai_free_skb(lvcc->tx.atmvcc, skb);
+- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
+ ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+ __net_timestamp(skb);
+ lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+ out:
+ lvcc->rx.buf.ptr = end;
+ cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
+ DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+ "vcc %d\n", lanai->number, (unsigned int) s, vci);
+ lanai->stats.service_rxnotaal5++;
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ return 0;
+ }
+ if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
+ int bytes;
+ read_unlock(&vcc_sklist_lock);
+ DPRINTK("got trashed rx pdu on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_trash++;
+ bytes = (SERVICE_GET_END(s) * 16) -
+ (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
+ }
+ if (s & SERVICE_STREAM) {
+ read_unlock(&vcc_sklist_lock);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_stream++;
+ printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+ "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
+ return 0;
+ }
+ DPRINTK("got rx crc error on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_rxcrc++;
+ lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+ cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff -urNp linux-2.6.31.7/drivers/atm/nicstar.c linux-2.6.31.7/drivers/atm/nicstar.c
+--- linux-2.6.31.7/drivers/atm/nicstar.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/nicstar.c 2009-12-08 17:39:43.234700892 -0500
+@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if ((vc = (vc_map *) vcc->dev_data) == NULL)
+ {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (!vc->tx)
+ {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
+ {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (skb_shinfo(skb)->nr_frags != 0)
+ {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0)
+ {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
+ {
+ printk("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+- atomic_add(i,&vcc->stats->rx_drop);
++ atomic_add_unchecked(i,&vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize))
+ {
+ RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
++ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (iovb == NULL)
+ {
+ printk("nicstar%d: Out of iovec buffers.\n", card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
+ {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
+ NS_SKB(iovb)->iovcnt = 0;
+ iovb->len = 0;
+@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, skb->truesize))
+ {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+ else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
+@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, sb->truesize))
+ {
+ push_rxbufs(card, sb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, skb->truesize))
+ {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (hb == NULL)
+ {
+ printk("nicstar%d: Out of huge buffers.\n", card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ }
+ else
+ dev_kfree_skb_any(hb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ #endif /* NS_USE_DESTRUCTORS */
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+
+diff -urNp linux-2.6.31.7/drivers/atm/solos-pci.c linux-2.6.31.7/drivers/atm/solos-pci.c
+--- linux-2.6.31.7/drivers/atm/solos-pci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/solos-pci.c 2009-12-08 17:39:43.235697690 -0500
+@@ -663,7 +663,7 @@ void solos_bh(unsigned long card_arg)
+ }
+ atm_charge(vcc, skb->truesize);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ break;
+
+ case PKT_STATUS:
+@@ -966,7 +966,7 @@ static uint32_t fpga_tx(struct solos_car
+ vcc = SKB_CB(oldskb)->vcc;
+
+ if (vcc) {
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ solos_pop(vcc, oldskb);
+ } else
+ dev_kfree_skb_irq(oldskb);
+diff -urNp linux-2.6.31.7/drivers/atm/suni.c linux-2.6.31.7/drivers/atm/suni.c
+--- linux-2.6.31.7/drivers/atm/suni.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/suni.c 2009-12-08 17:39:43.235697690 -0500
+@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+
+
+ #define ADD_LIMITED(s,v) \
+- atomic_add((v),&stats->s); \
+- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++ atomic_add_unchecked((v),&stats->s); \
++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+
+
+ static void suni_hz(unsigned long from_timer)
+diff -urNp linux-2.6.31.7/drivers/atm/uPD98402.c linux-2.6.31.7/drivers/atm/uPD98402.c
+--- linux-2.6.31.7/drivers/atm/uPD98402.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/uPD98402.c 2009-12-08 17:39:43.236696616 -0500
+@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
+ struct sonet_stats tmp;
+ int error = 0;
+
+- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+ sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+ if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+ if (zero && !error) {
+@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
+
+
+ #define ADD_LIMITED(s,v) \
+- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+
+
+ static void stat_event(struct atm_dev *dev)
+@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
+ if (reason & uPD98402_INT_PFM) stat_event(dev);
+ if (reason & uPD98402_INT_PCO) {
+ (void) GET(PCOCR); /* clear interrupt cause */
+- atomic_add(GET(HECCT),
++ atomic_add_unchecked(GET(HECCT),
+ &PRIV(dev)->sonet_stats.uncorr_hcs);
+ }
+ if ((reason & uPD98402_INT_RFO) &&
+@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
+ PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+ uPD98402_INT_LOS),PIMR); /* enable them */
+ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+ return 0;
+ }
+
+diff -urNp linux-2.6.31.7/drivers/atm/zatm.c linux-2.6.31.7/drivers/atm/zatm.c
+--- linux-2.6.31.7/drivers/atm/zatm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/atm/zatm.c 2009-12-08 17:39:43.236696616 -0500
+@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ }
+ if (!size) {
+ dev_kfree_skb_irq(skb);
+- if (vcc) atomic_inc(&vcc->stats->rx_err);
++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+ continue;
+ }
+ if (!atm_charge(vcc,skb->truesize)) {
+@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ skb->len = size;
+ ATM_SKB(skb)->vcc = vcc;
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
+ skb_queue_head(&zatm_vcc->backlog,skb);
+ break;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&zatm_vcc->tx_wait);
+ }
+
+diff -urNp linux-2.6.31.7/drivers/base/bus.c linux-2.6.31.7/drivers/base/bus.c
+--- linux-2.6.31.7/drivers/base/bus.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/base/bus.c 2009-12-08 17:39:43.237701084 -0500
+@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
+ return ret;
+ }
+
+-static struct sysfs_ops driver_sysfs_ops = {
++static const struct sysfs_ops driver_sysfs_ops = {
+ .show = drv_attr_show,
+ .store = drv_attr_store,
+ };
+@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
+ return ret;
+ }
+
+-static struct sysfs_ops bus_sysfs_ops = {
++static const struct sysfs_ops bus_sysfs_ops = {
+ .show = bus_attr_show,
+ .store = bus_attr_store,
+ };
+@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
+ return 0;
+ }
+
+-static struct kset_uevent_ops bus_uevent_ops = {
++static const struct kset_uevent_ops bus_uevent_ops = {
+ .filter = bus_uevent_filter,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/base/class.c linux-2.6.31.7/drivers/base/class.c
+--- linux-2.6.31.7/drivers/base/class.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/base/class.c 2009-12-08 17:39:43.237701084 -0500
+@@ -61,7 +61,7 @@ static void class_release(struct kobject
+ "be careful\n", class->name);
+ }
+
+-static struct sysfs_ops class_sysfs_ops = {
++static const struct sysfs_ops class_sysfs_ops = {
+ .show = class_attr_show,
+ .store = class_attr_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/base/core.c linux-2.6.31.7/drivers/base/core.c
+--- linux-2.6.31.7/drivers/base/core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/base/core.c 2009-12-08 17:39:43.248703024 -0500
+@@ -93,7 +93,7 @@ static ssize_t dev_attr_store(struct kob
+ return ret;
+ }
+
+-static struct sysfs_ops dev_sysfs_ops = {
++static const struct sysfs_ops dev_sysfs_ops = {
+ .show = dev_attr_show,
+ .store = dev_attr_store,
+ };
+@@ -242,7 +242,7 @@ static int dev_uevent(struct kset *kset,
+ return retval;
+ }
+
+-static struct kset_uevent_ops device_uevent_ops = {
++static const struct kset_uevent_ops device_uevent_ops = {
+ .filter = dev_uevent_filter,
+ .name = dev_uevent_name,
+ .uevent = dev_uevent,
+diff -urNp linux-2.6.31.7/drivers/base/memory.c linux-2.6.31.7/drivers/base/memory.c
+--- linux-2.6.31.7/drivers/base/memory.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/base/memory.c 2009-12-08 17:39:43.248703024 -0500
+@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
+ return retval;
+ }
+
+-static struct kset_uevent_ops memory_uevent_ops = {
++static const struct kset_uevent_ops memory_uevent_ops = {
+ .name = memory_uevent_name,
+ .uevent = memory_uevent,
+ };
+diff -urNp linux-2.6.31.7/drivers/base/sys.c linux-2.6.31.7/drivers/base/sys.c
+--- linux-2.6.31.7/drivers/base/sys.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/base/sys.c 2009-12-08 17:39:43.249904338 -0500
+@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
+ return -EIO;
+ }
+
+-static struct sysfs_ops sysfs_ops = {
++static const struct sysfs_ops sysfs_ops = {
+ .show = sysdev_show,
+ .store = sysdev_store,
+ };
+@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
+ return -EIO;
+ }
+
+-static struct sysfs_ops sysfs_class_ops = {
++static const struct sysfs_ops sysfs_class_ops = {
+ .show = sysdev_class_show,
+ .store = sysdev_class_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/block/cciss.c linux-2.6.31.7/drivers/block/cciss.c
+--- linux-2.6.31.7/drivers/block/cciss.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/block/cciss.c 2009-12-08 17:39:43.257703130 -0500
+@@ -363,7 +363,7 @@ static void cciss_seq_stop(struct seq_fi
+ h->busy_configuring = 0;
+ }
+
+-static struct seq_operations cciss_seq_ops = {
++static const struct seq_operations cciss_seq_ops = {
+ .start = cciss_seq_start,
+ .show = cciss_seq_show,
+ .next = cciss_seq_next,
+@@ -426,7 +426,7 @@ out:
+ return err;
+ }
+
+-static struct file_operations cciss_proc_fops = {
++static const struct file_operations cciss_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cciss_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/drivers/block/pktcdvd.c linux-2.6.31.7/drivers/block/pktcdvd.c
+--- linux-2.6.31.7/drivers/block/pktcdvd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/block/pktcdvd.c 2009-12-08 17:39:43.258704388 -0500
+@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
+ return len;
+ }
+
+-static struct sysfs_ops kobj_pkt_ops = {
++static const struct sysfs_ops kobj_pkt_ops = {
+ .show = kobj_pkt_show,
+ .store = kobj_pkt_store
+ };
+diff -urNp linux-2.6.31.7/drivers/char/agp/agp.h linux-2.6.31.7/drivers/char/agp/agp.h
+--- linux-2.6.31.7/drivers/char/agp/agp.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/agp/agp.h 2009-12-08 17:39:43.258704388 -0500
+@@ -126,7 +126,7 @@ struct agp_bridge_driver {
+ struct agp_bridge_data {
+ const struct agp_version *version;
+ const struct agp_bridge_driver *driver;
+- struct vm_operations_struct *vm_ops;
++ const struct vm_operations_struct *vm_ops;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+diff -urNp linux-2.6.31.7/drivers/char/agp/alpha-agp.c linux-2.6.31.7/drivers/char/agp/alpha-agp.c
+--- linux-2.6.31.7/drivers/char/agp/alpha-agp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/agp/alpha-agp.c 2009-12-08 17:39:43.268801128 -0500
+@@ -40,7 +40,7 @@ static struct aper_size_info_fixed alpha
+ { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+ };
+
+-struct vm_operations_struct alpha_core_agp_vm_ops = {
++const struct vm_operations_struct alpha_core_agp_vm_ops = {
+ .fault = alpha_core_agp_vm_fault,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/char/agp/frontend.c linux-2.6.31.7/drivers/char/agp/frontend.c
+--- linux-2.6.31.7/drivers/char/agp/frontend.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/agp/frontend.c 2009-12-08 17:39:43.271696434 -0500
+@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+diff -urNp linux-2.6.31.7/drivers/char/agp/intel-agp.c linux-2.6.31.7/drivers/char/agp/intel-agp.c
+--- linux-2.6.31.7/drivers/char/agp/intel-agp.c 2009-12-08 17:29:51.590237316 -0500
++++ linux-2.6.31.7/drivers/char/agp/intel-agp.c 2009-12-08 17:42:34.776662214 -0500
+@@ -2424,7 +2424,7 @@ static struct pci_device_id agp_intel_pc
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
+- { }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+diff -urNp linux-2.6.31.7/drivers/char/apm-emulation.c linux-2.6.31.7/drivers/char/apm-emulation.c
+--- linux-2.6.31.7/drivers/char/apm-emulation.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/apm-emulation.c 2009-12-08 17:39:43.302552187 -0500
+@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode
+ return as ? 0 : -ENOMEM;
+ }
+
+-static struct file_operations apm_bios_fops = {
++static const struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+diff -urNp linux-2.6.31.7/drivers/char/bfin-otp.c linux-2.6.31.7/drivers/char/bfin-otp.c
+--- linux-2.6.31.7/drivers/char/bfin-otp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/bfin-otp.c 2009-12-08 17:39:43.302552187 -0500
+@@ -133,7 +133,7 @@ static ssize_t bfin_otp_write(struct fil
+ # define bfin_otp_write NULL
+ #endif
+
+-static struct file_operations bfin_otp_fops = {
++static const struct file_operations bfin_otp_fops = {
+ .owner = THIS_MODULE,
+ .read = bfin_otp_read,
+ .write = bfin_otp_write,
+diff -urNp linux-2.6.31.7/drivers/char/hpet.c linux-2.6.31.7/drivers/char/hpet.c
+--- linux-2.6.31.7/drivers/char/hpet.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hpet.c 2009-12-08 17:39:43.310735720 -0500
+@@ -995,7 +995,7 @@ static struct acpi_driver hpet_acpi_driv
+ },
+ };
+
+-static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
++static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL };
+
+ static int __init hpet_init(void)
+ {
+diff -urNp linux-2.6.31.7/drivers/char/hvc_beat.c linux-2.6.31.7/drivers/char/hvc_beat.c
+--- linux-2.6.31.7/drivers/char/hvc_beat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_beat.c 2009-12-08 17:39:43.310735720 -0500
+@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
+ return cnt;
+ }
+
+-static struct hv_ops hvc_beat_get_put_ops = {
++static const struct hv_ops hvc_beat_get_put_ops = {
+ .get_chars = hvc_beat_get_chars,
+ .put_chars = hvc_beat_put_chars,
+ };
+diff -urNp linux-2.6.31.7/drivers/char/hvc_console.c linux-2.6.31.7/drivers/char/hvc_console.c
+--- linux-2.6.31.7/drivers/char/hvc_console.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_console.c 2009-12-08 17:39:43.316642867 -0500
+@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
+ * console interfaces but can still be used as a tty device. This has to be
+ * static because kmalloc will not work during early console init.
+ */
+-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
++static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
+ static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
+ {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
+
+@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
+ * vty adapters do NOT get an hvc_instantiate() callback since they
+ * appear after early console init.
+ */
+-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
++int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
+ {
+ struct hvc_struct *hp;
+
+@@ -751,7 +751,7 @@ static const struct tty_operations hvc_o
+ };
+
+ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
+- struct hv_ops *ops, int outbuf_size)
++ const struct hv_ops *ops, int outbuf_size)
+ {
+ struct hvc_struct *hp;
+ int i;
+diff -urNp linux-2.6.31.7/drivers/char/hvc_console.h linux-2.6.31.7/drivers/char/hvc_console.h
+--- linux-2.6.31.7/drivers/char/hvc_console.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_console.h 2009-12-08 17:39:43.316642867 -0500
+@@ -54,7 +54,7 @@ struct hvc_struct {
+ int outbuf_size;
+ int n_outbuf;
+ uint32_t vtermno;
+- struct hv_ops *ops;
++ const struct hv_ops *ops;
+ int irq_requested;
+ int data;
+ struct winsize ws;
+@@ -75,11 +75,11 @@ struct hv_ops {
+ };
+
+ /* Register a vterm and a slot index for use as a console (console_init) */
+-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
++extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
+
+ /* register a vterm for hvc tty operation (module_init or hotplug add) */
+ extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
+- struct hv_ops *ops, int outbuf_size);
++ const struct hv_ops *ops, int outbuf_size);
+ /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
+ extern int hvc_remove(struct hvc_struct *hp);
+
+diff -urNp linux-2.6.31.7/drivers/char/hvc_iseries.c linux-2.6.31.7/drivers/char/hvc_iseries.c
+--- linux-2.6.31.7/drivers/char/hvc_iseries.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_iseries.c 2009-12-08 17:39:43.317674239 -0500
+@@ -197,7 +197,7 @@ done:
+ return sent;
+ }
+
+-static struct hv_ops hvc_get_put_ops = {
++static const struct hv_ops hvc_get_put_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+ .notifier_add = notifier_add_irq,
+diff -urNp linux-2.6.31.7/drivers/char/hvc_iucv.c linux-2.6.31.7/drivers/char/hvc_iucv.c
+--- linux-2.6.31.7/drivers/char/hvc_iucv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_iucv.c 2009-12-08 17:39:43.318660755 -0500
+@@ -920,7 +920,7 @@ static int hvc_iucv_pm_restore_thaw(stru
+
+
+ /* HVC operations */
+-static struct hv_ops hvc_iucv_ops = {
++static const struct hv_ops hvc_iucv_ops = {
+ .get_chars = hvc_iucv_get_chars,
+ .put_chars = hvc_iucv_put_chars,
+ .notifier_add = hvc_iucv_notifier_add,
+diff -urNp linux-2.6.31.7/drivers/char/hvc_rtas.c linux-2.6.31.7/drivers/char/hvc_rtas.c
+--- linux-2.6.31.7/drivers/char/hvc_rtas.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_rtas.c 2009-12-08 17:39:43.318660755 -0500
+@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
+ return i;
+ }
+
+-static struct hv_ops hvc_rtas_get_put_ops = {
++static const struct hv_ops hvc_rtas_get_put_ops = {
+ .get_chars = hvc_rtas_read_console,
+ .put_chars = hvc_rtas_write_console,
+ };
+diff -urNp linux-2.6.31.7/drivers/char/hvcs.c linux-2.6.31.7/drivers/char/hvcs.c
+--- linux-2.6.31.7/drivers/char/hvcs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvcs.c 2009-12-08 17:39:43.325585361 -0500
+@@ -269,7 +269,7 @@ struct hvcs_struct {
+ unsigned int index;
+
+ struct tty_struct *tty;
+- int open_count;
++ atomic_t open_count;
+
+ /*
+ * Used to tell the driver kernel_thread what operations need to take
+@@ -419,7 +419,7 @@ static ssize_t hvcs_vterm_state_store(st
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+- if (hvcsd->open_count > 0) {
++ if (atomic_read(&hvcsd->open_count) > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+@@ -1135,7 +1135,7 @@ static int hvcs_open(struct tty_struct *
+ if ((retval = hvcs_partner_connect(hvcsd)))
+ goto error_release;
+
+- hvcsd->open_count = 1;
++ atomic_set(&hvcsd->open_count, 1);
+ hvcsd->tty = tty;
+ tty->driver_data = hvcsd;
+
+@@ -1169,7 +1169,7 @@ fast_open:
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ kref_get(&hvcsd->kref);
+- hvcsd->open_count++;
++ atomic_inc(&hvcsd->open_count);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+@@ -1213,7 +1213,7 @@ static void hvcs_close(struct tty_struct
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+- if (--hvcsd->open_count == 0) {
++ if (atomic_dec_and_test(&hvcsd->open_count)) {
+
+ vio_disable_interrupts(hvcsd->vdev);
+
+@@ -1239,10 +1239,10 @@ static void hvcs_close(struct tty_struct
+ free_irq(irq, hvcsd);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ return;
+- } else if (hvcsd->open_count < 0) {
++ } else if (atomic_read(&hvcsd->open_count) < 0) {
+ printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+ " is missmanaged.\n",
+- hvcsd->vdev->unit_address, hvcsd->open_count);
++ hvcsd->vdev->unit_address, atomic_read(&hvcsd->open_count));
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1258,7 +1258,7 @@ static void hvcs_hangup(struct tty_struc
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ /* Preserve this so that we know how many kref refs to put */
+- temp_open_count = hvcsd->open_count;
++ temp_open_count = atomic_read(&hvcsd->open_count);
+
+ /*
+ * Don't kref put inside the spinlock because the destruction
+@@ -1273,7 +1273,7 @@ static void hvcs_hangup(struct tty_struc
+ hvcsd->tty->driver_data = NULL;
+ hvcsd->tty = NULL;
+
+- hvcsd->open_count = 0;
++ atomic_set(&hvcsd->open_count, 0);
+
+ /* This will drop any buffered data on the floor which is OK in a hangup
+ * scenario. */
+@@ -1344,7 +1344,7 @@ static int hvcs_write(struct tty_struct
+ * the middle of a write operation? This is a crummy place to do this
+ * but we want to keep it all in the spinlock.
+ */
+- if (hvcsd->open_count <= 0) {
++ if (atomic_read(&hvcsd->open_count) <= 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return -ENODEV;
+ }
+@@ -1418,7 +1418,7 @@ static int hvcs_write_room(struct tty_st
+ {
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+- if (!hvcsd || hvcsd->open_count <= 0)
++ if (!hvcsd || atomic_read(&hvcsd->open_count) <= 0)
+ return 0;
+
+ return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff -urNp linux-2.6.31.7/drivers/char/hvc_udbg.c linux-2.6.31.7/drivers/char/hvc_udbg.c
+--- linux-2.6.31.7/drivers/char/hvc_udbg.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_udbg.c 2009-12-08 17:39:43.319709790 -0500
+@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
+ return i;
+ }
+
+-static struct hv_ops hvc_udbg_ops = {
++static const struct hv_ops hvc_udbg_ops = {
+ .get_chars = hvc_udbg_get,
+ .put_chars = hvc_udbg_put,
+ };
+diff -urNp linux-2.6.31.7/drivers/char/hvc_vio.c linux-2.6.31.7/drivers/char/hvc_vio.c
+--- linux-2.6.31.7/drivers/char/hvc_vio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_vio.c 2009-12-08 17:39:43.319709790 -0500
+@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
+ return got;
+ }
+
+-static struct hv_ops hvc_get_put_ops = {
++static const struct hv_ops hvc_get_put_ops = {
+ .get_chars = filtered_get_chars,
+ .put_chars = hvc_put_chars,
+ .notifier_add = notifier_add_irq,
+diff -urNp linux-2.6.31.7/drivers/char/hvc_xen.c linux-2.6.31.7/drivers/char/hvc_xen.c
+--- linux-2.6.31.7/drivers/char/hvc_xen.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/hvc_xen.c 2009-12-08 17:39:43.320657864 -0500
+@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
+ return recv;
+ }
+
+-static struct hv_ops hvc_ops = {
++static const struct hv_ops hvc_ops = {
+ .get_chars = read_console,
+ .put_chars = write_console,
+ .notifier_add = notifier_add_irq,
+diff -urNp linux-2.6.31.7/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.31.7/drivers/char/ipmi/ipmi_msghandler.c
+--- linux-2.6.31.7/drivers/char/ipmi/ipmi_msghandler.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/ipmi/ipmi_msghandler.c 2009-12-08 17:39:43.347649495 -0500
+@@ -413,7 +413,7 @@ struct ipmi_smi {
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+- atomic_t stats[IPMI_NUM_STATS];
++ atomic_unchecked_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+@@ -446,9 +446,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+ #define ipmi_inc_stat(intf, stat) \
+- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+ static int is_lan_addr(struct ipmi_addr *addr)
+ {
+@@ -2807,7 +2807,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+- atomic_set(&intf->stats[i], 0);
++ atomic_set_unchecked(&intf->stats[i], 0);
+
+ intf->proc_dir = NULL;
+
+diff -urNp linux-2.6.31.7/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.31.7/drivers/char/ipmi/ipmi_si_intf.c
+--- linux-2.6.31.7/drivers/char/ipmi/ipmi_si_intf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/ipmi/ipmi_si_intf.c 2009-12-08 17:39:43.348709894 -0500
+@@ -277,7 +277,7 @@ struct smi_info {
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+- atomic_t stats[SI_NUM_STATS];
++ atomic_unchecked_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+@@ -285,9 +285,9 @@ struct smi_info {
+ };
+
+ #define smi_inc_stat(smi, stat) \
+- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+
+ #define SI_MAX_PARMS 4
+
+@@ -2926,7 +2926,7 @@ static int try_smi_init(struct smi_info
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+- atomic_set(&new_smi->stats[i], 0);
++ atomic_set_unchecked(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = 0;
+ atomic_set(&new_smi->stop_operation, 0);
+diff -urNp linux-2.6.31.7/drivers/char/keyboard.c linux-2.6.31.7/drivers/char/keyboard.c
+--- linux-2.6.31.7/drivers/char/keyboard.c 2009-12-08 17:29:51.591012400 -0500
++++ linux-2.6.31.7/drivers/char/keyboard.c 2009-12-08 17:39:43.352706833 -0500
+@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
+ kbd->kbdmode == VC_MEDIUMRAW) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc);
+ }
+
+@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
+ .evbit = { BIT_MASK(EV_SND) },
+ },
+
+- { }, /* Terminating entry */
++ { 0 }, /* Terminating entry */
+ };
+
+ MODULE_DEVICE_TABLE(input, kbd_ids);
+diff -urNp linux-2.6.31.7/drivers/char/mem.c linux-2.6.31.7/drivers/char/mem.c
+--- linux-2.6.31.7/drivers/char/mem.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/mem.c 2009-12-08 17:39:43.352706833 -0500
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -35,6 +36,10 @@
+ # include <linux/efi.h>
+ #endif
+
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern struct file_operations grsec_fops;
++#endif
++
+ /*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+@@ -192,6 +197,11 @@ static ssize_t write_mem(struct file * f
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_write();
++ return -EPERM;
++#endif
++
+ written = 0;
+
+ #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+@@ -301,7 +311,7 @@ static inline int private_mapping_ok(str
+ }
+ #endif
+
+-static struct vm_operations_struct mmap_mem_ops = {
++static const struct vm_operations_struct mmap_mem_ops = {
+ #ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys
+ #endif
+@@ -324,6 +334,11 @@ static int mmap_mem(struct file * file,
+ &vma->vm_page_prot))
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma))
++ return -EPERM;
++#endif
++
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size,
+ vma->vm_page_prot);
+@@ -558,6 +573,11 @@ static ssize_t write_kmem(struct file *
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_kmem_write();
++ return -EPERM;
++#endif
++
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+@@ -763,6 +783,16 @@ static loff_t memory_lseek(struct file *
+
+ static int open_port(struct inode * inode, struct file * filp)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_open_port();
++ return -EPERM;
++#endif
++
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ }
+
+@@ -770,7 +800,6 @@ static int open_port(struct inode * inod
+ #define full_lseek null_lseek
+ #define write_zero write_null
+ #define read_full read_zero
+-#define open_mem open_port
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
+@@ -888,6 +917,9 @@ static const struct {
+ #ifdef CONFIG_CRASH_DUMP
+ {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops, NULL},
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ {13,"grsec", S_IRUSR | S_IWUGO, &grsec_fops},
++#endif
+ };
+
+ static int memory_open(struct inode *inode, struct file *filp)
+diff -urNp linux-2.6.31.7/drivers/char/misc.c linux-2.6.31.7/drivers/char/misc.c
+--- linux-2.6.31.7/drivers/char/misc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/misc.c 2009-12-08 17:39:43.354705873 -0500
+@@ -91,7 +91,7 @@ static int misc_seq_show(struct seq_file
+ }
+
+
+-static struct seq_operations misc_seq_ops = {
++static const struct seq_operations misc_seq_ops = {
+ .start = misc_seq_start,
+ .next = misc_seq_next,
+ .stop = misc_seq_stop,
+diff -urNp linux-2.6.31.7/drivers/char/mspec.c linux-2.6.31.7/drivers/char/mspec.c
+--- linux-2.6.31.7/drivers/char/mspec.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/mspec.c 2009-12-08 17:39:43.354705873 -0500
+@@ -239,7 +239,7 @@ mspec_fault(struct vm_area_struct *vma,
+ return VM_FAULT_NOPAGE;
+ }
+
+-static struct vm_operations_struct mspec_vm_ops = {
++static const struct vm_operations_struct mspec_vm_ops = {
+ .open = mspec_open,
+ .close = mspec_close,
+ .fault = mspec_fault,
+diff -urNp linux-2.6.31.7/drivers/char/nvram.c linux-2.6.31.7/drivers/char/nvram.c
+--- linux-2.6.31.7/drivers/char/nvram.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/nvram.c 2009-12-08 17:39:43.355710089 -0500
+@@ -429,7 +429,10 @@ static const struct file_operations nvra
+ static struct miscdevice nvram_dev = {
+ NVRAM_MINOR,
+ "nvram",
+- &nvram_fops
++ &nvram_fops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+ static int __init nvram_init(void)
+diff -urNp linux-2.6.31.7/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.31.7/drivers/char/pcmcia/ipwireless/tty.c
+--- linux-2.6.31.7/drivers/char/pcmcia/ipwireless/tty.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/pcmcia/ipwireless/tty.c 2009-12-08 17:39:43.356663297 -0500
+@@ -51,7 +51,7 @@ struct ipw_tty {
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+- int open_count;
++ atomic_t open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+@@ -127,10 +127,10 @@ static int ipw_open(struct tty_struct *l
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+- if (tty->open_count == 0)
++ if (atomic_read(&tty->open_count) == 0)
+ tty->tx_bytes_queued = 0;
+
+- tty->open_count++;
++ atomic_inc(&tty->open_count);
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+@@ -146,9 +146,7 @@ static int ipw_open(struct tty_struct *l
+
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+- tty->open_count--;
+-
+- if (tty->open_count == 0) {
++ if (atomic_dec_return(&tty->open_count) == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+@@ -169,7 +167,7 @@ static void ipw_hangup(struct tty_struct
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (tty->open_count == 0) {
++ if (atomic_read(&tty->open_count) == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -198,7 +196,7 @@ void ipwireless_tty_received(struct ipw_
+ return;
+ }
+
+- if (!tty->open_count) {
++ if (!atomic_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -240,7 +238,7 @@ static int ipw_write(struct tty_struct *
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (!tty->open_count) {
++ if (!atomic_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+@@ -280,7 +278,7 @@ static int ipw_write_room(struct tty_str
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!atomic_read(&tty->open_count))
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -322,7 +320,7 @@ static int ipw_chars_in_buffer(struct tt
+ if (!tty)
+ return 0;
+
+- if (!tty->open_count)
++ if (!atomic_read(&tty->open_count))
+ return 0;
+
+ return tty->tx_bytes_queued;
+@@ -403,7 +401,7 @@ static int ipw_tiocmget(struct tty_struc
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!atomic_read(&tty->open_count))
+ return -EINVAL;
+
+ return get_control_lines(tty);
+@@ -419,7 +417,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!atomic_read(&tty->open_count))
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+@@ -433,7 +431,7 @@ static int ipw_ioctl(struct tty_struct *
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!atomic_read(&tty->open_count))
+ return -EINVAL;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+@@ -591,7 +589,7 @@ void ipwireless_tty_free(struct ipw_tty
+ against a parallel ioctl etc */
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+- while (ttyj->open_count)
++ while (atomic_read(&ttyj->open_count))
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+diff -urNp linux-2.6.31.7/drivers/char/random.c linux-2.6.31.7/drivers/char/random.c
+--- linux-2.6.31.7/drivers/char/random.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/random.c 2009-12-08 17:39:43.363193220 -0500
+@@ -253,8 +253,13 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define INPUT_POOL_WORDS 512
++#define OUTPUT_POOL_WORDS 128
++#else
+ #define INPUT_POOL_WORDS 128
+ #define OUTPUT_POOL_WORDS 32
++#endif
+ #define SEC_XFER_SIZE 512
+
+ /*
+@@ -291,10 +296,17 @@ static struct poolinfo {
+ int poolwords;
+ int tap1, tap2, tap3, tap4, tap5;
+ } poolinfo_table[] = {
++#ifdef CONFIG_GRKERNSEC_RANDNET
++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
++ { 512, 411, 308, 208, 104, 1 },
++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
++ { 128, 103, 76, 51, 25, 1 },
++#else
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { 2048, 1638, 1231, 819, 411, 1 },
+@@ -1204,7 +1216,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+
+diff -urNp linux-2.6.31.7/drivers/char/sonypi.c linux-2.6.31.7/drivers/char/sonypi.c
+--- linux-2.6.31.7/drivers/char/sonypi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/sonypi.c 2009-12-08 17:39:43.363716011 -0500
+@@ -490,7 +490,7 @@ static struct sonypi_device {
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+- int open_count;
++ atomic_t open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+@@ -894,7 +894,7 @@ static int sonypi_misc_fasync(int fd, st
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+ mutex_lock(&sonypi_device.lock);
+- sonypi_device.open_count--;
++ atomic_dec(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+ }
+@@ -904,9 +904,9 @@ static int sonypi_misc_open(struct inode
+ lock_kernel();
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+- if (!sonypi_device.open_count)
++ if (!atomic_read(&sonypi_device.open_count))
+ kfifo_reset(sonypi_device.fifo);
+- sonypi_device.open_count++;
++ atomic_inc(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+ unlock_kernel();
+ return 0;
+diff -urNp linux-2.6.31.7/drivers/char/tpm/tpm_bios.c linux-2.6.31.7/drivers/char/tpm/tpm_bios.c
+--- linux-2.6.31.7/drivers/char/tpm/tpm_bios.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/tpm/tpm_bios.c 2009-12-08 17:39:43.364707566 -0500
+@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
+ return NULL;
+
+ return addr;
+@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
+ return NULL;
+
+ (*pos)++;
+@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
+ int i;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+- seq_putc(m, data[i]);
++ if (!seq_putc(m, data[i]))
++ return -EFAULT;
+
+ return 0;
+ }
+@@ -343,14 +344,14 @@ static int tpm_ascii_bios_measurements_s
+ return 0;
+ }
+
+-static struct seq_operations tpm_ascii_b_measurments_seqops = {
++static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_ascii_bios_measurements_show,
+ };
+
+-static struct seq_operations tpm_binary_b_measurments_seqops = {
++static const struct seq_operations tpm_binary_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ virt = acpi_os_map_memory(start, len);
++ if (!virt) {
++ kfree(log->bios_event_log);
++ log->bios_event_log = NULL;
++ return -EFAULT;
++ }
+
+ memcpy(log->bios_event_log, virt, len);
+
+diff -urNp linux-2.6.31.7/drivers/char/tty_ldisc.c linux-2.6.31.7/drivers/char/tty_ldisc.c
+--- linux-2.6.31.7/drivers/char/tty_ldisc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/tty_ldisc.c 2009-12-08 17:39:43.365712349 -0500
+@@ -73,7 +73,7 @@ static void put_ldisc(struct tty_ldisc *
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+- ldo->refcount--;
++ atomic_dec(&ldo->refcount);
+ module_put(ldo->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+@@ -107,7 +107,7 @@ int tty_register_ldisc(int disc, struct
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+- new_ldisc->refcount = 0;
++ atomic_set(&new_ldisc->refcount, 0);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+@@ -135,7 +135,7 @@ int tty_unregister_ldisc(int disc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty_ldiscs[disc]->refcount)
++ if (atomic_read(&tty_ldiscs[disc]->refcount))
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+@@ -175,7 +175,7 @@ static struct tty_ldisc *tty_ldisc_try_g
+ err = -EAGAIN;
+ else {
+ /* lock it */
+- ldops->refcount++;
++ atomic_inc(&ldops->refcount);
+ ld->ops = ldops;
+ atomic_set(&ld->users, 1);
+ err = 0;
+diff -urNp linux-2.6.31.7/drivers/char/vt_ioctl.c linux-2.6.31.7/drivers/char/vt_ioctl.c
+--- linux-2.6.31.7/drivers/char/vt_ioctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/vt_ioctl.c 2009-12-08 17:39:43.366683015 -0500
+@@ -97,6 +97,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ return -EPERM;
++#endif
++
+ if (!i && v == K_NOSUCHMAP) {
+ /* deallocate map */
+ key_map = key_maps[s];
+@@ -237,6 +243,13 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
+ goto reterr;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG)) {
++ ret = -EPERM;
++ goto reterr;
++ }
++#endif
++
+ q = func_table[i];
+ first_free = funcbufptr + (funcbufsize - funcbufleft);
+ for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+diff -urNp linux-2.6.31.7/drivers/char/xilinx_hwicap/xilinx_hwicap.c linux-2.6.31.7/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+--- linux-2.6.31.7/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2009-12-08 17:39:43.367714078 -0500
+@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *
+ return status;
+ }
+
+-static struct file_operations hwicap_fops = {
++static const struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+diff -urNp linux-2.6.31.7/drivers/cpufreq/cpufreq.c linux-2.6.31.7/drivers/cpufreq/cpufreq.c
+--- linux-2.6.31.7/drivers/cpufreq/cpufreq.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/cpufreq/cpufreq.c 2009-12-08 17:39:43.367714078 -0500
+@@ -745,7 +745,7 @@ static void cpufreq_sysfs_release(struct
+ complete(&policy->kobj_unregister);
+ }
+
+-static struct sysfs_ops sysfs_ops = {
++static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+ };
+diff -urNp linux-2.6.31.7/drivers/cpuidle/sysfs.c linux-2.6.31.7/drivers/cpuidle/sysfs.c
+--- linux-2.6.31.7/drivers/cpuidle/sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/cpuidle/sysfs.c 2009-12-08 17:39:43.373715473 -0500
+@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
+ return ret;
+ }
+
+-static struct sysfs_ops cpuidle_sysfs_ops = {
++static const struct sysfs_ops cpuidle_sysfs_ops = {
+ .show = cpuidle_show,
+ .store = cpuidle_store,
+ };
+@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
+ return ret;
+ }
+
+-static struct sysfs_ops cpuidle_state_sysfs_ops = {
++static const struct sysfs_ops cpuidle_state_sysfs_ops = {
+ .show = cpuidle_state_show,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/edac/edac_core.h linux-2.6.31.7/drivers/edac/edac_core.h
+--- linux-2.6.31.7/drivers/edac/edac_core.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/edac/edac_core.h 2009-12-08 17:39:43.373715473 -0500
+@@ -99,11 +99,11 @@ extern int edac_debug_level;
+
+ #else /* !CONFIG_EDAC_DEBUG */
+
+-#define debugf0( ... )
+-#define debugf1( ... )
+-#define debugf2( ... )
+-#define debugf3( ... )
+-#define debugf4( ... )
++#define debugf0( ... ) do {} while (0)
++#define debugf1( ... ) do {} while (0)
++#define debugf2( ... ) do {} while (0)
++#define debugf3( ... ) do {} while (0)
++#define debugf4( ... ) do {} while (0)
+
+ #endif /* !CONFIG_EDAC_DEBUG */
+
+diff -urNp linux-2.6.31.7/drivers/edac/edac_device_sysfs.c linux-2.6.31.7/drivers/edac/edac_device_sysfs.c
+--- linux-2.6.31.7/drivers/edac/edac_device_sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/edac/edac_device_sysfs.c 2009-12-08 17:39:43.374715116 -0500
+@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
+ }
+
+ /* edac_dev file operations for an 'ctl_info' */
+-static struct sysfs_ops device_ctl_info_ops = {
++static const struct sysfs_ops device_ctl_info_ops = {
+ .show = edac_dev_ctl_info_show,
+ .store = edac_dev_ctl_info_store
+ };
+@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
+ }
+
+ /* edac_dev file operations for an 'instance' */
+-static struct sysfs_ops device_instance_ops = {
++static const struct sysfs_ops device_instance_ops = {
+ .show = edac_dev_instance_show,
+ .store = edac_dev_instance_store
+ };
+@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
+ }
+
+ /* edac_dev file operations for a 'block' */
+-static struct sysfs_ops device_block_ops = {
++static const struct sysfs_ops device_block_ops = {
+ .show = edac_dev_block_show,
+ .store = edac_dev_block_store
+ };
+diff -urNp linux-2.6.31.7/drivers/edac/edac_mc_sysfs.c linux-2.6.31.7/drivers/edac/edac_mc_sysfs.c
+--- linux-2.6.31.7/drivers/edac/edac_mc_sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/edac/edac_mc_sysfs.c 2009-12-08 17:39:43.375718644 -0500
+@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
+ return -EIO;
+ }
+
+-static struct sysfs_ops csrowfs_ops = {
++static const struct sysfs_ops csrowfs_ops = {
+ .show = csrowdev_show,
+ .store = csrowdev_store
+ };
+@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
+ }
+
+ /* Intermediate show/store table */
+-static struct sysfs_ops mci_ops = {
++static const struct sysfs_ops mci_ops = {
+ .show = mcidev_show,
+ .store = mcidev_store
+ };
+diff -urNp linux-2.6.31.7/drivers/edac/edac_pci_sysfs.c linux-2.6.31.7/drivers/edac/edac_pci_sysfs.c
+--- linux-2.6.31.7/drivers/edac/edac_pci_sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/edac/edac_pci_sysfs.c 2009-12-08 17:39:43.375718644 -0500
+@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
+ }
+
+ /* fs_ops table */
+-static struct sysfs_ops pci_instance_ops = {
++static const struct sysfs_ops pci_instance_ops = {
+ .show = edac_pci_instance_show,
+ .store = edac_pci_instance_store
+ };
+@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
+ return -EIO;
+ }
+
+-static struct sysfs_ops edac_pci_sysfs_ops = {
++static const struct sysfs_ops edac_pci_sysfs_ops = {
+ .show = edac_pci_dev_show,
+ .store = edac_pci_dev_store
+ };
+diff -urNp linux-2.6.31.7/drivers/firmware/dmi_scan.c linux-2.6.31.7/drivers/firmware/dmi_scan.c
+--- linux-2.6.31.7/drivers/firmware/dmi_scan.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/firmware/dmi_scan.c 2009-12-08 17:39:43.376716928 -0500
+@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
+ }
+ }
+ else {
+- /*
+- * no iounmap() for that ioremap(); it would be a no-op, but
+- * it's so early in setup that sucker gets confused into doing
+- * what it shouldn't if we actually call it.
+- */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto error;
+diff -urNp linux-2.6.31.7/drivers/firmware/edd.c linux-2.6.31.7/drivers/firmware/edd.c
+--- linux-2.6.31.7/drivers/firmware/edd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/firmware/edd.c 2009-12-08 17:39:43.376716928 -0500
+@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
+ return ret;
+ }
+
+-static struct sysfs_ops edd_attr_ops = {
++static const struct sysfs_ops edd_attr_ops = {
+ .show = edd_attr_show,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/firmware/efivars.c linux-2.6.31.7/drivers/firmware/efivars.c
+--- linux-2.6.31.7/drivers/firmware/efivars.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/firmware/efivars.c 2009-12-08 17:39:43.390948328 -0500
+@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
+ return ret;
+ }
+
+-static struct sysfs_ops efivar_attr_ops = {
++static const struct sysfs_ops efivar_attr_ops = {
+ .show = efivar_attr_show,
+ .store = efivar_attr_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/firmware/iscsi_ibft.c linux-2.6.31.7/drivers/firmware/iscsi_ibft.c
+--- linux-2.6.31.7/drivers/firmware/iscsi_ibft.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/firmware/iscsi_ibft.c 2009-12-08 17:39:43.391713258 -0500
+@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
+ return ret;
+ }
+
+-static struct sysfs_ops ibft_attr_ops = {
++static const struct sysfs_ops ibft_attr_ops = {
+ .show = ibft_show_attribute,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/firmware/memmap.c linux-2.6.31.7/drivers/firmware/memmap.c
+--- linux-2.6.31.7/drivers/firmware/memmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/firmware/memmap.c 2009-12-08 17:39:43.391713258 -0500
+@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
+ NULL
+ };
+
+-static struct sysfs_ops memmap_attr_ops = {
++static const struct sysfs_ops memmap_attr_ops = {
+ .show = memmap_attr_show,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/gpio/gpiolib.c linux-2.6.31.7/drivers/gpio/gpiolib.c
+--- linux-2.6.31.7/drivers/gpio/gpiolib.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpio/gpiolib.c 2009-12-08 17:39:43.391713258 -0500
+@@ -1244,7 +1244,7 @@ static int gpiolib_open(struct inode *in
+ return single_open(file, gpiolib_show, NULL);
+ }
+
+-static struct file_operations gpiolib_operations = {
++static const struct file_operations gpiolib_operations = {
+ .open = gpiolib_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/drm_drv.c linux-2.6.31.7/drivers/gpu/drm/drm_drv.c
+--- linux-2.6.31.7/drivers/gpu/drm/drm_drv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/drm_drv.c 2009-12-08 17:39:43.391713258 -0500
+@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
+ char *kdata = NULL;
+
+ atomic_inc(&dev->ioctl_count);
+- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/drm_fops.c linux-2.6.31.7/drivers/gpu/drm/drm_fops.c
+--- linux-2.6.31.7/drivers/gpu/drm/drm_fops.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/drm_fops.c 2009-12-08 17:39:43.408694628 -0500
+@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+- atomic_set(&dev->counts[i], 0);
++ atomic_set_unchecked(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
+
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
+ spin_lock(&dev->count_lock);
+- if (!dev->open_count++) {
++ if (atomic_inc_return(&dev->open_count) == 1) {
+ spin_unlock(&dev->count_lock);
+ retcode = drm_setup(dev);
+ goto out;
+@@ -433,7 +433,7 @@ int drm_release(struct inode *inode, str
+
+ lock_kernel();
+
+- DRM_DEBUG("open_count = %d\n", dev->open_count);
++ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+@@ -445,7 +445,7 @@ int drm_release(struct inode *inode, str
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+- dev->open_count);
++ atomic_read(&dev->open_count));
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+@@ -522,9 +522,9 @@ int drm_release(struct inode *inode, str
+ * End inline drm_release
+ */
+
+- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
+ spin_lock(&dev->count_lock);
+- if (!--dev->open_count) {
++ if (atomic_dec_and_test(&dev->open_count)) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/drm_ioctl.c linux-2.6.31.7/drivers/gpu/drm/drm_ioctl.c
+--- linux-2.6.31.7/drivers/gpu/drm/drm_ioctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/drm_ioctl.c 2009-12-08 17:39:43.408694628 -0500
+@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
+ stats->data[i].value =
+ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+ else
+- stats->data[i].value = atomic_read(&dev->counts[i]);
++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
+ }
+
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/drm_lock.c linux-2.6.31.7/drivers/gpu/drm/drm_lock.c
+--- linux-2.6.31.7/drivers/gpu/drm/drm_lock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/drm_lock.c 2009-12-08 17:39:43.409716393 -0500
+@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
+ return -EINVAL;
+ }
+
+- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ /* kernel_context_switch isn't used by any of the x86 drm
+ * modules but is required by the Sparc driver.
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/drm_vm.c linux-2.6.31.7/drivers/gpu/drm/drm_vm.c
+--- linux-2.6.31.7/drivers/gpu/drm/drm_vm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/drm_vm.c 2009-12-08 17:39:43.416772400 -0500
+@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_are
+ }
+
+ /** AGP virtual memory operations */
+-static struct vm_operations_struct drm_vm_ops = {
++static const struct vm_operations_struct drm_vm_ops = {
+ .fault = drm_vm_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+ };
+
+ /** Shared virtual memory operations */
+-static struct vm_operations_struct drm_vm_shm_ops = {
++static const struct vm_operations_struct drm_vm_shm_ops = {
+ .fault = drm_vm_shm_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_shm_close,
+ };
+
+ /** DMA virtual memory operations */
+-static struct vm_operations_struct drm_vm_dma_ops = {
++static const struct vm_operations_struct drm_vm_dma_ops = {
+ .fault = drm_vm_dma_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+ };
+
+ /** Scatter-gather virtual memory operations */
+-static struct vm_operations_struct drm_vm_sg_ops = {
++static const struct vm_operations_struct drm_vm_sg_ops = {
+ .fault = drm_vm_sg_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i810/i810_dma.c linux-2.6.31.7/drivers/gpu/drm/i810/i810_dma.c
+--- linux-2.6.31.7/drivers/gpu/drm/i810/i810_dma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i810/i810_dma.c 2009-12-08 17:39:43.422897548 -0500
+@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
+
+- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
+
+- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7017.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7017.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7017.c 2009-12-08 17:39:43.423717313 -0500
+@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops ch7017_ops = {
++const struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7xxx.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7xxx.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ch7xxx.c 2009-12-08 17:39:43.424717314 -0500
+@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops ch7xxx_ops = {
++const struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo.h linux-2.6.31.7/drivers/gpu/drm/i915/dvo.h
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo.h 2009-12-08 17:39:43.423717313 -0500
+@@ -51,14 +51,14 @@ struct intel_dvo_dev_ops {
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+- bool (*init)(struct intel_dvo_device *dvo,
++ bool (* const init)(struct intel_dvo_device *dvo,
+ struct i2c_adapter *i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+- void (*create_resources)(struct intel_dvo_device *dvo);
++ void (* const create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output or set intermediate power levels if available.
+@@ -67,17 +67,17 @@ struct intel_dvo_dev_ops {
+ * If the mode is DPMSModeOff, the output must be disabled,
+ * as the DPLL may be disabled afterwards.
+ */
+- void (*dpms)(struct intel_dvo_device *dvo, int mode);
++ void (* const dpms)(struct intel_dvo_device *dvo, int mode);
+
+ /*
+ * Saves the output's state for restoration on VT switch.
+ */
+- void (*save)(struct intel_dvo_device *dvo);
++ void (* const save)(struct intel_dvo_device *dvo);
+
+ /*
+ * Restore's the output's state at VT switch.
+ */
+- void (*restore)(struct intel_dvo_device *dvo);
++ void (* const restore)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for testing a video mode for a given output.
+@@ -88,7 +88,7 @@ struct intel_dvo_dev_ops {
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+- int (*mode_valid)(struct intel_dvo_device *dvo,
++ int (* const mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+@@ -98,19 +98,19 @@ struct intel_dvo_dev_ops {
+ * timings, which is used for panels with fixed timings or for
+ * buses with clock limitations.
+ */
+- bool (*mode_fixup)(struct intel_dvo_device *dvo,
++ bool (* const mode_fixup)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+- void (*prepare)(struct intel_dvo_device *dvo);
++ void (* const prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+- void (*commit)(struct intel_dvo_device *dvo);
++ void (* const commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+@@ -119,14 +119,14 @@ struct intel_dvo_dev_ops {
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+- void (*mode_set)(struct intel_dvo_device *dvo,
++ void (* const mode_set)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+- enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
++ enum drm_connector_status (* const detect)(struct intel_dvo_device *dvo);
+
+ /**
+ * Query the device for the modes it provides.
+@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
++ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+- void (*destroy) (struct intel_dvo_device *dvo);
++ void (* const destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+- void (*dump_regs)(struct intel_dvo_device *dvo);
++ void (* const dump_regs)(struct intel_dvo_device *dvo);
+ };
+
+-extern struct intel_dvo_dev_ops sil164_ops;
+-extern struct intel_dvo_dev_ops ch7xxx_ops;
+-extern struct intel_dvo_dev_ops ivch_ops;
+-extern struct intel_dvo_dev_ops tfp410_ops;
+-extern struct intel_dvo_dev_ops ch7017_ops;
++extern const struct intel_dvo_dev_ops sil164_ops;
++extern const struct intel_dvo_dev_ops ch7xxx_ops;
++extern const struct intel_dvo_dev_ops ivch_ops;
++extern const struct intel_dvo_dev_ops tfp410_ops;
++extern const struct intel_dvo_dev_ops ch7017_ops;
+
+ #endif /* _INTEL_DVO_H */
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ivch.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ivch.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo_ivch.c 2009-12-08 17:39:43.424717314 -0500
+@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
+ }
+ }
+
+-struct intel_dvo_dev_ops ivch_ops= {
++const struct intel_dvo_dev_ops ivch_ops= {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .save = ivch_save,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.31.7/drivers/gpu/drm/i915/dvo_sil164.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo_sil164.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo_sil164.c 2009-12-08 17:39:43.425671034 -0500
+@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops sil164_ops = {
++const struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.31.7/drivers/gpu/drm/i915/dvo_tfp410.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/dvo_tfp410.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/dvo_tfp410.c 2009-12-08 17:39:43.425671034 -0500
+@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
+ }
+ }
+
+-struct intel_dvo_dev_ops tfp410_ops = {
++const struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/i915/i915_drv.c linux-2.6.31.7/drivers/gpu/drm/i915/i915_drv.c
+--- linux-2.6.31.7/drivers/gpu/drm/i915/i915_drv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/i915/i915_drv.c 2009-12-08 17:39:43.426725674 -0500
+@@ -152,7 +152,7 @@ i915_pci_resume(struct pci_dev *pdev)
+ return i915_resume(dev);
+ }
+
+-static struct vm_operations_struct i915_gem_vm_ops = {
++static const struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_atombios.c
+--- linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_atombios.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_atombios.c 2009-12-08 17:39:43.436862196 -0500
+@@ -425,13 +425,13 @@ bool radeon_get_atom_connector_info_from
+ return true;
+ }
+
+-struct bios_connector {
++static struct bios_connector {
+ bool valid;
+ uint8_t line_mux;
+ uint16_t devices;
+ int connector_type;
+ struct radeon_i2c_bus_rec ddc_bus;
+-};
++} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];;
+
+ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ drm_device
+@@ -447,7 +447,6 @@ bool radeon_get_atom_connector_info_from
+ uint8_t dac;
+ union atom_supported_devices *supported_devices;
+ int i, j;
+- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+
+ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_state.c
+--- linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_state.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_state.c 2009-12-08 17:39:43.445731933 -0500
+@@ -3007,7 +3007,7 @@ static int radeon_cp_getparam(struct drm
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t *param = data;
+- int value;
++ int value = 0;
+
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_ttm.c
+--- linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_ttm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/radeon/radeon_ttm.c 2009-12-08 17:39:43.446719280 -0500
+@@ -500,27 +500,10 @@ void radeon_ttm_fini(struct radeon_devic
+ DRM_INFO("radeon: ttm finalized\n");
+ }
+
+-static struct vm_operations_struct radeon_ttm_vm_ops;
+-static struct vm_operations_struct *ttm_vm_ops = NULL;
+-
+-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+-{
+- struct ttm_buffer_object *bo;
+- int r;
+-
+- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+- if (bo == NULL) {
+- return VM_FAULT_NOPAGE;
+- }
+- r = ttm_vm_ops->fault(vma, vmf);
+- return r;
+-}
+-
+ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct drm_file *file_priv;
+ struct radeon_device *rdev;
+- int r;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ return drm_mmap(filp, vma);
+@@ -528,20 +511,9 @@ int radeon_mmap(struct file *filp, struc
+
+ file_priv = (struct drm_file *)filp->private_data;
+ rdev = file_priv->minor->dev->dev_private;
+- if (rdev == NULL) {
++ if (!rdev)
+ return -EINVAL;
+- }
+- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+- if (unlikely(r != 0)) {
+- return r;
+- }
+- if (unlikely(ttm_vm_ops == NULL)) {
+- ttm_vm_ops = vma->vm_ops;
+- radeon_ttm_vm_ops = *ttm_vm_ops;
+- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+- }
+- vma->vm_ops = &radeon_ttm_vm_ops;
+- return 0;
++ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+ }
+
+
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_bo_vm.c
+--- linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_bo_vm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_bo_vm.c 2009-12-08 17:39:43.446719280 -0500
+@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
+ {
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+- struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_device *bdev;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ int retval = VM_FAULT_NOPAGE;
+
++ if (!bo)
++ return VM_FAULT_NOPAGE;
++ bdev = bo->bdev;
++
+ /*
+ * Work around locking order reversal in fault / nopfn
+ * between mmap_sem and bo_reserve: Perform a trylock operation
+@@ -228,7 +232,7 @@ static void ttm_bo_vm_close(struct vm_ar
+ vma->vm_private_data = NULL;
+ }
+
+-static struct vm_operations_struct ttm_bo_vm_ops = {
++static const struct vm_operations_struct ttm_bo_vm_ops = {
+ .fault = ttm_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close
+diff -urNp linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_global.c
+--- linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_global.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/gpu/drm/ttm/ttm_global.c 2009-12-08 17:39:43.448716153 -0500
+@@ -36,7 +36,7 @@
+ struct ttm_global_item {
+ struct mutex mutex;
+ void *object;
+- int refcount;
++ atomic_t refcount;
+ };
+
+ static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void ttm_global_init(void)
+ struct ttm_global_item *item = &glob[i];
+ mutex_init(&item->mutex);
+ item->object = NULL;
+- item->refcount = 0;
++ atomic_set(&item->refcount, 0);
+ }
+ }
+
+@@ -59,7 +59,7 @@ void ttm_global_release(void)
+ for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
+ struct ttm_global_item *item = &glob[i];
+ BUG_ON(item->object != NULL);
+- BUG_ON(item->refcount != 0);
++ BUG_ON(atomic_read(&item->refcount) != 0);
+ }
+ }
+
+@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
+ void *object;
+
+ mutex_lock(&item->mutex);
+- if (item->refcount == 0) {
++ if (atomic_read(&item->refcount) == 0) {
+ item->object = kmalloc(ref->size, GFP_KERNEL);
+ if (unlikely(item->object == NULL)) {
+ ret = -ENOMEM;
+@@ -82,7 +82,7 @@ int ttm_global_item_ref(struct ttm_globa
+ if (unlikely(ret != 0))
+ goto out_err;
+
+- ++item->refcount;
++ atomic_inc(&item->refcount);
+ }
+ ref->object = item->object;
+ object = item->object;
+@@ -101,9 +101,9 @@ void ttm_global_item_unref(struct ttm_gl
+ struct ttm_global_item *item = &glob[ref->global_type];
+
+ mutex_lock(&item->mutex);
+- BUG_ON(item->refcount == 0);
++ BUG_ON(atomic_read(&item->refcount) == 0);
+ BUG_ON(ref->object != item->object);
+- if (--item->refcount == 0) {
++ if (atomic_dec_and_test(&item->refcount)) {
+ ref->release(ref);
+ kfree(item->object);
+ item->object = NULL;
+diff -urNp linux-2.6.31.7/drivers/hwmon/fschmd.c linux-2.6.31.7/drivers/hwmon/fschmd.c
+--- linux-2.6.31.7/drivers/hwmon/fschmd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/fschmd.c 2009-12-08 17:39:43.449686383 -0500
+@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *
+ return ret;
+ }
+
+-static struct file_operations watchdog_fops = {
++static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = watchdog_open,
+diff -urNp linux-2.6.31.7/drivers/hwmon/fscpos.c linux-2.6.31.7/drivers/hwmon/fscpos.c
+--- linux-2.6.31.7/drivers/hwmon/fscpos.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/fscpos.c 2009-12-08 17:39:43.449686383 -0500
+@@ -240,7 +240,6 @@ static ssize_t set_pwm(struct i2c_client
+ unsigned long v = simple_strtoul(buf, NULL, 10);
+
+ /* Range: 0..255 */
+- if (v < 0) v = 0;
+ if (v > 255) v = 255;
+
+ mutex_lock(&data->update_lock);
+diff -urNp linux-2.6.31.7/drivers/hwmon/k8temp.c linux-2.6.31.7/drivers/hwmon/k8temp.c
+--- linux-2.6.31.7/drivers/hwmon/k8temp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/k8temp.c 2009-12-08 17:39:43.450718403 -0500
+@@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n
+
+ static struct pci_device_id k8temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
+- { 0 },
++ { 0, 0, 0, 0, 0, 0, 0 },
+ };
+
+ MODULE_DEVICE_TABLE(pci, k8temp_ids);
+diff -urNp linux-2.6.31.7/drivers/hwmon/sis5595.c linux-2.6.31.7/drivers/hwmon/sis5595.c
+--- linux-2.6.31.7/drivers/hwmon/sis5595.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/sis5595.c 2009-12-08 17:39:43.450718403 -0500
+@@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_upda
+
+ static struct pci_device_id sis5595_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
+diff -urNp linux-2.6.31.7/drivers/hwmon/via686a.c linux-2.6.31.7/drivers/hwmon/via686a.c
+--- linux-2.6.31.7/drivers/hwmon/via686a.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/via686a.c 2009-12-08 17:39:43.451694646 -0500
+@@ -769,7 +769,7 @@ static struct via686a_data *via686a_upda
+
+ static struct pci_device_id via686a_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
+diff -urNp linux-2.6.31.7/drivers/hwmon/vt8231.c linux-2.6.31.7/drivers/hwmon/vt8231.c
+--- linux-2.6.31.7/drivers/hwmon/vt8231.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/vt8231.c 2009-12-08 17:39:43.452657331 -0500
+@@ -699,7 +699,7 @@ static struct platform_driver vt8231_dri
+
+ static struct pci_device_id vt8231_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
+diff -urNp linux-2.6.31.7/drivers/hwmon/w83791d.c linux-2.6.31.7/drivers/hwmon/w83791d.c
+--- linux-2.6.31.7/drivers/hwmon/w83791d.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/hwmon/w83791d.c 2009-12-08 17:39:43.452657331 -0500
+@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
+ struct i2c_board_info *info);
+ static int w83791d_remove(struct i2c_client *client);
+
+-static int w83791d_read(struct i2c_client *client, u8 register);
+-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
++static int w83791d_read(struct i2c_client *client, u8 reg);
++static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
+ static struct w83791d_data *w83791d_update_device(struct device *dev);
+
+ #ifdef DEBUG
+diff -urNp linux-2.6.31.7/drivers/i2c/busses/i2c-i801.c linux-2.6.31.7/drivers/i2c/busses/i2c-i801.c
+--- linux-2.6.31.7/drivers/i2c/busses/i2c-i801.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/i2c/busses/i2c-i801.c 2009-12-08 17:39:43.453704039 -0500
+@@ -578,7 +578,7 @@ static struct pci_device_id i801_ids[] =
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, i801_ids);
+diff -urNp linux-2.6.31.7/drivers/i2c/busses/i2c-piix4.c linux-2.6.31.7/drivers/i2c/busses/i2c-piix4.c
+--- linux-2.6.31.7/drivers/i2c/busses/i2c-piix4.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/i2c/busses/i2c-piix4.c 2009-12-08 17:39:43.461721067 -0500
+@@ -123,7 +123,7 @@ static struct dmi_system_id __devinitdat
+ .ident = "IBM",
+ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
+ },
+- { },
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
+@@ -489,7 +489,7 @@ static struct pci_device_id piix4_ids[]
+ PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, piix4_ids);
+diff -urNp linux-2.6.31.7/drivers/i2c/busses/i2c-sis630.c linux-2.6.31.7/drivers/i2c/busses/i2c-sis630.c
+--- linux-2.6.31.7/drivers/i2c/busses/i2c-sis630.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/i2c/busses/i2c-sis630.c 2009-12-08 17:39:43.462728148 -0500
+@@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter
+ static struct pci_device_id sis630_ids[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, sis630_ids);
+diff -urNp linux-2.6.31.7/drivers/i2c/busses/i2c-sis96x.c linux-2.6.31.7/drivers/i2c/busses/i2c-sis96x.c
+--- linux-2.6.31.7/drivers/i2c/busses/i2c-sis96x.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/i2c/busses/i2c-sis96x.c 2009-12-08 17:39:43.467717608 -0500
+@@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter
+
+ static struct pci_device_id sis96x_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, sis96x_ids);
+diff -urNp linux-2.6.31.7/drivers/ieee1394/dma.c linux-2.6.31.7/drivers/ieee1394/dma.c
+--- linux-2.6.31.7/drivers/ieee1394/dma.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/dma.c 2009-12-08 17:39:43.467717608 -0500
+@@ -247,7 +247,7 @@ static int dma_region_pagefault(struct v
+ return 0;
+ }
+
+-static struct vm_operations_struct dma_region_vm_ops = {
++static const struct vm_operations_struct dma_region_vm_ops = {
+ .fault = dma_region_pagefault,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/ieee1394/dv1394.c linux-2.6.31.7/drivers/ieee1394/dv1394.c
+--- linux-2.6.31.7/drivers/ieee1394/dv1394.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/dv1394.c 2009-12-08 17:39:43.495995721 -0500
+@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
+ based upon DIF section and sequence
+ */
+
+-static void inline
++static inline void
+ frame_put_packet (struct frame *f, struct packet *p)
+ {
+ int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
+@@ -2178,7 +2178,7 @@ static const struct ieee1394_device_id d
+ .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
+ .version = AVC_SW_VERSION_ENTRY & 0xffffff
+ },
+- { }
++ { 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
+diff -urNp linux-2.6.31.7/drivers/ieee1394/eth1394.c linux-2.6.31.7/drivers/ieee1394/eth1394.c
+--- linux-2.6.31.7/drivers/ieee1394/eth1394.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/eth1394.c 2009-12-08 17:39:43.499724787 -0500
+@@ -445,7 +445,7 @@ static const struct ieee1394_device_id e
+ .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
+ .version = ETHER1394_GASP_VERSION,
+ },
+- {}
++ { 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
+diff -urNp linux-2.6.31.7/drivers/ieee1394/hosts.c linux-2.6.31.7/drivers/ieee1394/hosts.c
+--- linux-2.6.31.7/drivers/ieee1394/hosts.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/hosts.c 2009-12-08 17:39:43.500729195 -0500
+@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
+ }
+
+ static struct hpsb_host_driver dummy_driver = {
++ .name = "dummy",
+ .transmit_packet = dummy_transmit_packet,
+ .devctl = dummy_devctl,
+ .isoctl = dummy_isoctl
+diff -urNp linux-2.6.31.7/drivers/ieee1394/ohci1394.c linux-2.6.31.7/drivers/ieee1394/ohci1394.c
+--- linux-2.6.31.7/drivers/ieee1394/ohci1394.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/ohci1394.c 2009-12-08 17:39:43.515735201 -0500
+@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
+ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
+
+ /* Module Parameters */
+-static int phys_dma = 1;
++static int phys_dma;
+ module_param(phys_dma, int, 0444);
+-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
++MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
+
+ static void dma_trm_tasklet(unsigned long data);
+ static void dma_trm_reset(struct dma_trm_ctx *d);
+@@ -3449,7 +3449,7 @@ static struct pci_device_id ohci1394_pci
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+- { 0, },
++ { 0, 0, 0, 0, 0, 0, 0 },
+ };
+
+ MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
+diff -urNp linux-2.6.31.7/drivers/ieee1394/raw1394.c linux-2.6.31.7/drivers/ieee1394/raw1394.c
+--- linux-2.6.31.7/drivers/ieee1394/raw1394.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/raw1394.c 2009-12-08 17:39:43.549731244 -0500
+@@ -2999,7 +2999,7 @@ static const struct ieee1394_device_id r
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+ .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+ .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
+- {}
++ { 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
+diff -urNp linux-2.6.31.7/drivers/ieee1394/sbp2.c linux-2.6.31.7/drivers/ieee1394/sbp2.c
+--- linux-2.6.31.7/drivers/ieee1394/sbp2.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/sbp2.c 2009-12-08 17:39:43.557010744 -0500
+@@ -290,7 +290,7 @@ static const struct ieee1394_device_id s
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+ .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
+ .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
+- {}
++ { 0, 0, 0, 0, 0, 0 }
+ };
+ MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+@@ -2112,7 +2112,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
+ MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
+ MODULE_LICENSE("GPL");
+
+-static int sbp2_module_init(void)
++static int __init sbp2_module_init(void)
+ {
+ int ret;
+
+diff -urNp linux-2.6.31.7/drivers/ieee1394/video1394.c linux-2.6.31.7/drivers/ieee1394/video1394.c
+--- linux-2.6.31.7/drivers/ieee1394/video1394.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/ieee1394/video1394.c 2009-12-08 17:39:43.558729763 -0500
+@@ -1310,7 +1310,7 @@ static const struct ieee1394_device_id v
+ .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
+ .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
+ },
+- { }
++ { 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
+diff -urNp linux-2.6.31.7/drivers/infiniband/core/cm.c linux-2.6.31.7/drivers/infiniband/core/cm.c
+--- linux-2.6.31.7/drivers/infiniband/core/cm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/infiniband/core/cm.c 2009-12-08 17:39:43.572733262 -0500
+@@ -3597,7 +3597,7 @@ static ssize_t cm_show_counter(struct ko
+ atomic_long_read(&group->counter[cm_attr->index]));
+ }
+
+-static struct sysfs_ops cm_counter_ops = {
++static const struct sysfs_ops cm_counter_ops = {
+ .show = cm_show_counter
+ };
+
+diff -urNp linux-2.6.31.7/drivers/infiniband/core/sysfs.c linux-2.6.31.7/drivers/infiniband/core/sysfs.c
+--- linux-2.6.31.7/drivers/infiniband/core/sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/infiniband/core/sysfs.c 2009-12-08 17:39:43.582999057 -0500
+@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
+ return port_attr->show(p, port_attr, buf);
+ }
+
+-static struct sysfs_ops port_sysfs_ops = {
++static const struct sysfs_ops port_sysfs_ops = {
+ .show = port_attr_show
+ };
+
+diff -urNp linux-2.6.31.7/drivers/infiniband/hw/ehca/ehca_uverbs.c linux-2.6.31.7/drivers/infiniband/hw/ehca/ehca_uverbs.c
+--- linux-2.6.31.7/drivers/infiniband/hw/ehca/ehca_uverbs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/infiniband/hw/ehca/ehca_uverbs.c 2009-12-08 17:39:43.594582945 -0500
+@@ -95,7 +95,7 @@ static void ehca_mm_close(struct vm_area
+ vma->vm_start, vma->vm_end, *count);
+ }
+
+-static struct vm_operations_struct vm_ops = {
++static const struct vm_operations_struct vm_ops = {
+ .open = ehca_mm_open,
+ .close = ehca_mm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_file_ops.c linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_file_ops.c
+--- linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_file_ops.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_file_ops.c 2009-12-08 17:39:43.614749081 -0500
+@@ -1151,7 +1151,7 @@ static int ipath_file_vma_fault(struct v
+ return 0;
+ }
+
+-static struct vm_operations_struct ipath_file_vm_ops = {
++static const struct vm_operations_struct ipath_file_vm_ops = {
+ .fault = ipath_file_vma_fault,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_mmap.c linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_mmap.c
+--- linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/infiniband/hw/ipath/ipath_mmap.c 2009-12-08 17:39:43.614749081 -0500
+@@ -74,7 +74,7 @@ static void ipath_vma_close(struct vm_ar
+ kref_put(&ip->ref, ipath_release_mmap_info);
+ }
+
+-static struct vm_operations_struct ipath_vm_ops = {
++static const struct vm_operations_struct ipath_vm_ops = {
+ .open = ipath_vma_open,
+ .close = ipath_vma_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/input/keyboard/atkbd.c linux-2.6.31.7/drivers/input/keyboard/atkbd.c
+--- linux-2.6.31.7/drivers/input/keyboard/atkbd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/keyboard/atkbd.c 2009-12-08 17:39:43.614749081 -0500
+@@ -1188,7 +1188,7 @@ static struct serio_device_id atkbd_seri
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+- { 0 }
++ { 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
+diff -urNp linux-2.6.31.7/drivers/input/mouse/lifebook.c linux-2.6.31.7/drivers/input/mouse/lifebook.c
+--- linux-2.6.31.7/drivers/input/mouse/lifebook.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/mouse/lifebook.c 2009-12-08 17:39:43.615745298 -0500
+@@ -116,7 +116,7 @@ static const struct dmi_system_id lifebo
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse)
+diff -urNp linux-2.6.31.7/drivers/input/mouse/psmouse-base.c linux-2.6.31.7/drivers/input/mouse/psmouse-base.c
+--- linux-2.6.31.7/drivers/input/mouse/psmouse-base.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/mouse/psmouse-base.c 2009-12-08 17:39:43.618732204 -0500
+@@ -1380,7 +1380,7 @@ static struct serio_device_id psmouse_se
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+- { 0 }
++ { 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
+diff -urNp linux-2.6.31.7/drivers/input/mouse/synaptics.c linux-2.6.31.7/drivers/input/mouse/synaptics.c
+--- linux-2.6.31.7/drivers/input/mouse/synaptics.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/mouse/synaptics.c 2009-12-08 17:39:43.631698727 -0500
+@@ -437,7 +437,7 @@ static void synaptics_process_packet(str
+ break;
+ case 2:
+ if (SYN_MODEL_PEN(priv->model_id))
+- ; /* Nothing, treat a pen as a single finger */
++ break; /* Nothing, treat a pen as a single finger */
+ break;
+ case 4 ... 15:
+ if (SYN_CAP_PALMDETECT(priv->capabilities))
+@@ -663,7 +663,7 @@ static const struct dmi_system_id toshib
+ },
+
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+ #endif
+
+diff -urNp linux-2.6.31.7/drivers/input/mousedev.c linux-2.6.31.7/drivers/input/mousedev.c
+--- linux-2.6.31.7/drivers/input/mousedev.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/mousedev.c 2009-12-08 17:39:43.639753293 -0500
+@@ -1056,7 +1056,7 @@ static struct input_handler mousedev_han
+
+ #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
+ static struct miscdevice psaux_mouse = {
+- PSMOUSE_MINOR, "psaux", &mousedev_fops
++ PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL
+ };
+ static int psaux_registered;
+ #endif
+diff -urNp linux-2.6.31.7/drivers/input/serio/i8042-x86ia64io.h linux-2.6.31.7/drivers/input/serio/i8042-x86ia64io.h
+--- linux-2.6.31.7/drivers/input/serio/i8042-x86ia64io.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/serio/i8042-x86ia64io.h 2009-12-08 17:39:43.641743931 -0500
+@@ -167,7 +167,7 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ /*
+@@ -390,7 +390,7 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
+@@ -436,7 +436,7 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ #ifdef CONFIG_PNP
+@@ -455,7 +455,7 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+ #endif
+
+@@ -522,7 +522,7 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ #endif /* CONFIG_X86 */
+diff -urNp linux-2.6.31.7/drivers/input/serio/serio_raw.c linux-2.6.31.7/drivers/input/serio/serio_raw.c
+--- linux-2.6.31.7/drivers/input/serio/serio_raw.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/input/serio/serio_raw.c 2009-12-08 17:39:43.642743923 -0500
+@@ -376,7 +376,7 @@ static struct serio_device_id serio_raw_
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+- { 0 }
++ { 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids);
+diff -urNp linux-2.6.31.7/drivers/isdn/capi/kcapi_proc.c linux-2.6.31.7/drivers/isdn/capi/kcapi_proc.c
+--- linux-2.6.31.7/drivers/isdn/capi/kcapi_proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/isdn/capi/kcapi_proc.c 2009-12-08 17:39:43.642743923 -0500
+@@ -89,14 +89,14 @@ static int contrstats_show(struct seq_fi
+ return 0;
+ }
+
+-static struct seq_operations seq_controller_ops = {
++static const struct seq_operations seq_controller_ops = {
+ .start = controller_start,
+ .next = controller_next,
+ .stop = controller_stop,
+ .show = controller_show,
+ };
+
+-static struct seq_operations seq_contrstats_ops = {
++static const struct seq_operations seq_contrstats_ops = {
+ .start = controller_start,
+ .next = controller_next,
+ .stop = controller_stop,
+@@ -194,14 +194,14 @@ applstats_show(struct seq_file *seq, voi
+ return 0;
+ }
+
+-static struct seq_operations seq_applications_ops = {
++static const struct seq_operations seq_applications_ops = {
+ .start = applications_start,
+ .next = applications_next,
+ .stop = applications_stop,
+ .show = applications_show,
+ };
+
+-static struct seq_operations seq_applstats_ops = {
++static const struct seq_operations seq_applstats_ops = {
+ .start = applications_start,
+ .next = applications_next,
+ .stop = applications_stop,
+@@ -264,7 +264,7 @@ static int capi_driver_show(struct seq_f
+ return 0;
+ }
+
+-static struct seq_operations seq_capi_driver_ops = {
++static const struct seq_operations seq_capi_driver_ops = {
+ .start = capi_driver_start,
+ .next = capi_driver_next,
+ .stop = capi_driver_stop,
+diff -urNp linux-2.6.31.7/drivers/isdn/gigaset/common.c linux-2.6.31.7/drivers/isdn/gigaset/common.c
+--- linux-2.6.31.7/drivers/isdn/gigaset/common.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/isdn/gigaset/common.c 2009-12-08 17:39:43.642743923 -0500
+@@ -665,7 +665,7 @@ struct cardstate *gigaset_initcs(struct
+ cs->commands_pending = 0;
+ cs->cur_at_seq = 0;
+ cs->gotfwver = -1;
+- cs->open_count = 0;
++ atomic_set(&cs->open_count, 0);
+ cs->dev = NULL;
+ cs->tty = NULL;
+ cs->tty_dev = NULL;
+diff -urNp linux-2.6.31.7/drivers/isdn/gigaset/gigaset.h linux-2.6.31.7/drivers/isdn/gigaset/gigaset.h
+--- linux-2.6.31.7/drivers/isdn/gigaset/gigaset.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/isdn/gigaset/gigaset.h 2009-12-08 17:39:43.643740864 -0500
+@@ -446,7 +446,7 @@ struct cardstate {
+ spinlock_t cmdlock;
+ unsigned curlen, cmdbytes;
+
+- unsigned open_count;
++ atomic_t open_count;
+ struct tty_struct *tty;
+ struct tasklet_struct if_wake_tasklet;
+ unsigned control_state;
+diff -urNp linux-2.6.31.7/drivers/isdn/gigaset/interface.c linux-2.6.31.7/drivers/isdn/gigaset/interface.c
+--- linux-2.6.31.7/drivers/isdn/gigaset/interface.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/isdn/gigaset/interface.c 2009-12-08 17:39:43.643740864 -0500
+@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
+ return -ERESTARTSYS; // FIXME -EINTR?
+ tty->driver_data = cs;
+
+- ++cs->open_count;
+-
+- if (cs->open_count == 1) {
++ if (atomic_inc_return(&cs->open_count) == 1) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = tty;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+- if (!--cs->open_count) {
++ if (!atomic_dec_return(&cs->open_count)) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = NULL;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+ retval = 0;
+@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected");
+- else if (!cs->open_count)
++ else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED)
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+ //FIXME
+@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!atomic_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+ //FIXME
+@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
+ goto out;
+ }
+
+- if (!cs->open_count) {
++ if (!atomic_read(&cs->open_count)) {
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ goto out;
+ }
+diff -urNp linux-2.6.31.7/drivers/lguest/core.c linux-2.6.31.7/drivers/lguest/core.c
+--- linux-2.6.31.7/drivers/lguest/core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/lguest/core.c 2009-12-08 17:39:43.643740864 -0500
+@@ -92,9 +92,17 @@ static __init int map_switcher(void)
+ * it's worked so far. The end address needs +1 because __get_vm_area
+ * allocates an extra guard page, so we need space for that.
+ */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#else
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#endif
++
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+diff -urNp linux-2.6.31.7/drivers/lguest/lguest_user.c linux-2.6.31.7/drivers/lguest/lguest_user.c
+--- linux-2.6.31.7/drivers/lguest/lguest_user.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/lguest/lguest_user.c 2009-12-08 17:39:43.644750627 -0500
+@@ -508,7 +508,7 @@ static int close(struct inode *inode, st
+ * uses: reading and writing a character device called /dev/lguest. All the
+ * work happens in the read(), write() and close() routines:
+ */
+-static struct file_operations lguest_fops = {
++static const struct file_operations lguest_fops = {
+ .owner = THIS_MODULE,
+ .release = close,
+ .write = write,
+diff -urNp linux-2.6.31.7/drivers/macintosh/via-pmu-backlight.c linux-2.6.31.7/drivers/macintosh/via-pmu-backlight.c
+--- linux-2.6.31.7/drivers/macintosh/via-pmu-backlight.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/macintosh/via-pmu-backlight.c 2009-12-08 17:39:43.644750627 -0500
+@@ -15,7 +15,7 @@
+
+ #define MAX_PMU_LEVEL 0xFF
+
+-static struct backlight_ops pmu_backlight_data;
++static const struct backlight_ops pmu_backlight_data;
+ static DEFINE_SPINLOCK(pmu_backlight_lock);
+ static int sleeping, uses_pmu_bl;
+ static u8 bl_curve[FB_BACKLIGHT_LEVELS];
+@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops pmu_backlight_data = {
++static const struct backlight_ops pmu_backlight_data = {
+ .get_brightness = pmu_backlight_get_brightness,
+ .update_status = pmu_backlight_update_status,
+
+diff -urNp linux-2.6.31.7/drivers/macintosh/via-pmu.c linux-2.6.31.7/drivers/macintosh/via-pmu.c
+--- linux-2.6.31.7/drivers/macintosh/via-pmu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/macintosh/via-pmu.c 2009-12-08 17:39:43.653686408 -0500
+@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
+ && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
+ }
+
+-static struct platform_suspend_ops pmu_pm_ops = {
++static const struct platform_suspend_ops pmu_pm_ops = {
+ .enter = powerbook_sleep,
+ .valid = pmu_sleep_valid,
+ };
+diff -urNp linux-2.6.31.7/drivers/md/bitmap.c linux-2.6.31.7/drivers/md/bitmap.c
+--- linux-2.6.31.7/drivers/md/bitmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/md/bitmap.c 2009-12-08 17:39:43.653686408 -0500
+@@ -58,7 +58,7 @@
+ # if DEBUG > 0
+ # define PRINTK(x...) printk(KERN_DEBUG x)
+ # else
+-# define PRINTK(x...)
++# define PRINTK(x...) do {} while (0)
+ # endif
+ #endif
+
+diff -urNp linux-2.6.31.7/drivers/md/dm-sysfs.c linux-2.6.31.7/drivers/md/dm-sysfs.c
+--- linux-2.6.31.7/drivers/md/dm-sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/md/dm-sysfs.c 2009-12-08 17:39:43.654675270 -0500
+@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
+ NULL,
+ };
+
+-static struct sysfs_ops dm_sysfs_ops = {
++static const struct sysfs_ops dm_sysfs_ops = {
+ .show = dm_attr_show,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/md/dm-table.c linux-2.6.31.7/drivers/md/dm-table.c
+--- linux-2.6.31.7/drivers/md/dm-table.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/md/dm-table.c 2009-12-08 17:39:43.654675270 -0500
+@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
+ if (!dev_size)
+ return 0;
+
+- if ((start >= dev_size) || (start + len > dev_size)) {
++ if ((start >= dev_size) || (len > dev_size - start)) {
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+diff -urNp linux-2.6.31.7/drivers/md/md.c linux-2.6.31.7/drivers/md/md.c
+--- linux-2.6.31.7/drivers/md/md.c 2009-12-08 17:29:51.596232240 -0500
++++ linux-2.6.31.7/drivers/md/md.c 2009-12-08 17:39:43.662676226 -0500
+@@ -2474,7 +2474,7 @@ static void rdev_free(struct kobject *ko
+ mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
+ kfree(rdev);
+ }
+-static struct sysfs_ops rdev_sysfs_ops = {
++static const struct sysfs_ops rdev_sysfs_ops = {
+ .show = rdev_attr_show,
+ .store = rdev_attr_store,
+ };
+@@ -3844,7 +3844,7 @@ static void md_free(struct kobject *ko)
+ kfree(mddev);
+ }
+
+-static struct sysfs_ops md_sysfs_ops = {
++static const struct sysfs_ops md_sysfs_ops = {
+ .show = md_attr_show,
+ .store = md_attr_store,
+ };
+@@ -5963,7 +5963,7 @@ static int md_seq_show(struct seq_file *
+ chunk_kb ? "KB" : "B");
+ if (bitmap->file) {
+ seq_printf(seq, ", file: ");
+- seq_path(seq, &bitmap->file->f_path, " \t\n");
++ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
+ }
+
+ seq_printf(seq, "\n");
+@@ -6057,7 +6057,7 @@ static int is_mddev_idle(mddev_t *mddev,
+ struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&disk->sync_io);
++ atomic_read_unchecked(&disk->sync_io);
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+diff -urNp linux-2.6.31.7/drivers/md/md.h linux-2.6.31.7/drivers/md/md.h
+--- linux-2.6.31.7/drivers/md/md.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/md/md.h 2009-12-08 17:39:43.663747160 -0500
+@@ -303,7 +303,7 @@ static inline void rdev_dec_pending(mdk_
+
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+
+ struct mdk_personality
+diff -urNp linux-2.6.31.7/drivers/media/dvb/dvb-core/dmxdev.c linux-2.6.31.7/drivers/media/dvb/dvb-core/dmxdev.c
+--- linux-2.6.31.7/drivers/media/dvb/dvb-core/dmxdev.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/dvb/dvb-core/dmxdev.c 2009-12-08 17:39:43.663747160 -0500
+@@ -1086,7 +1086,7 @@ static unsigned int dvb_dvr_poll(struct
+ return mask;
+ }
+
+-static struct file_operations dvb_dvr_fops = {
++static const struct file_operations dvb_dvr_fops = {
+ .owner = THIS_MODULE,
+ .read = dvb_dvr_read,
+ .write = dvb_dvr_write,
+diff -urNp linux-2.6.31.7/drivers/media/dvb/firewire/firedtv-ci.c linux-2.6.31.7/drivers/media/dvb/firewire/firedtv-ci.c
+--- linux-2.6.31.7/drivers/media/dvb/firewire/firedtv-ci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/dvb/firewire/firedtv-ci.c 2009-12-08 17:39:43.664741742 -0500
+@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(stru
+ return POLLIN;
+ }
+
+-static struct file_operations fdtv_ca_fops = {
++static const struct file_operations fdtv_ca_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = dvb_generic_ioctl,
+ .open = dvb_generic_open,
+diff -urNp linux-2.6.31.7/drivers/media/video/cafe_ccic.c linux-2.6.31.7/drivers/media/video/cafe_ccic.c
+--- linux-2.6.31.7/drivers/media/video/cafe_ccic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/cafe_ccic.c 2009-12-08 17:39:43.665719951 -0500
+@@ -1326,7 +1326,7 @@ static void cafe_v4l_vm_close(struct vm_
+ mutex_unlock(&sbuf->cam->s_mutex);
+ }
+
+-static struct vm_operations_struct cafe_v4l_vm_ops = {
++static const struct vm_operations_struct cafe_v4l_vm_ops = {
+ .open = cafe_v4l_vm_open,
+ .close = cafe_v4l_vm_close
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/et61x251/et61x251_core.c linux-2.6.31.7/drivers/media/video/et61x251/et61x251_core.c
+--- linux-2.6.31.7/drivers/media/video/et61x251/et61x251_core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/et61x251/et61x251_core.c 2009-12-08 17:39:43.665719951 -0500
+@@ -1494,7 +1494,7 @@ static void et61x251_vm_close(struct vm_
+ }
+
+
+-static struct vm_operations_struct et61x251_vm_ops = {
++static const struct vm_operations_struct et61x251_vm_ops = {
+ .open = et61x251_vm_open,
+ .close = et61x251_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/gspca/gspca.c linux-2.6.31.7/drivers/media/video/gspca/gspca.c
+--- linux-2.6.31.7/drivers/media/video/gspca/gspca.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/gspca/gspca.c 2009-12-08 17:39:43.666744425 -0500
+@@ -99,7 +99,7 @@ static void gspca_vm_close(struct vm_are
+ frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED;
+ }
+
+-static struct vm_operations_struct gspca_vm_ops = {
++static const struct vm_operations_struct gspca_vm_ops = {
+ .open = gspca_vm_open,
+ .close = gspca_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/meye.c linux-2.6.31.7/drivers/media/video/meye.c
+--- linux-2.6.31.7/drivers/media/video/meye.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/meye.c 2009-12-08 17:39:43.666744425 -0500
+@@ -1589,7 +1589,7 @@ static void meye_vm_close(struct vm_area
+ meye.vma_use_count[idx]--;
+ }
+
+-static struct vm_operations_struct meye_vm_ops = {
++static const struct vm_operations_struct meye_vm_ops = {
+ .open = meye_vm_open,
+ .close = meye_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/sn9c102/sn9c102_core.c linux-2.6.31.7/drivers/media/video/sn9c102/sn9c102_core.c
+--- linux-2.6.31.7/drivers/media/video/sn9c102/sn9c102_core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/sn9c102/sn9c102_core.c 2009-12-08 17:39:43.667767481 -0500
+@@ -2075,7 +2075,7 @@ static void sn9c102_vm_close(struct vm_a
+ }
+
+
+-static struct vm_operations_struct sn9c102_vm_ops = {
++static const struct vm_operations_struct sn9c102_vm_ops = {
+ .open = sn9c102_vm_open,
+ .close = sn9c102_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/stk-webcam.c linux-2.6.31.7/drivers/media/video/stk-webcam.c
+--- linux-2.6.31.7/drivers/media/video/stk-webcam.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/stk-webcam.c 2009-12-08 17:39:43.668743303 -0500
+@@ -790,7 +790,7 @@ static void stk_v4l_vm_close(struct vm_a
+ if (sbuf->mapcount == 0)
+ sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
+ }
+-static struct vm_operations_struct stk_v4l_vm_ops = {
++static const struct vm_operations_struct stk_v4l_vm_ops = {
+ .open = stk_v4l_vm_open,
+ .close = stk_v4l_vm_close
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/usbvideo/konicawc.c linux-2.6.31.7/drivers/media/video/usbvideo/konicawc.c
+--- linux-2.6.31.7/drivers/media/video/usbvideo/konicawc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/usbvideo/konicawc.c 2009-12-08 17:39:43.668743303 -0500
+@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
+ int error;
+
+ usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
+- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
++ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
+
+ cam->input = input_dev = input_allocate_device();
+ if (!input_dev) {
+diff -urNp linux-2.6.31.7/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.31.7/drivers/media/video/usbvideo/quickcam_messenger.c
+--- linux-2.6.31.7/drivers/media/video/usbvideo/quickcam_messenger.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/usbvideo/quickcam_messenger.c 2009-12-08 17:39:43.668743303 -0500
+@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
+ int error;
+
+ usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
+- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
++ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
+
+ cam->input = input_dev = input_allocate_device();
+ if (!input_dev) {
+diff -urNp linux-2.6.31.7/drivers/media/video/uvc/uvc_v4l2.c linux-2.6.31.7/drivers/media/video/uvc/uvc_v4l2.c
+--- linux-2.6.31.7/drivers/media/video/uvc/uvc_v4l2.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/uvc/uvc_v4l2.c 2009-12-08 17:39:43.676598452 -0500
+@@ -1063,7 +1063,7 @@ static void uvc_vm_close(struct vm_area_
+ buffer->vma_use_count--;
+ }
+
+-static struct vm_operations_struct uvc_vm_ops = {
++static const struct vm_operations_struct uvc_vm_ops = {
+ .open = uvc_vm_open,
+ .close = uvc_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/videobuf-dma-contig.c linux-2.6.31.7/drivers/media/video/videobuf-dma-contig.c
+--- linux-2.6.31.7/drivers/media/video/videobuf-dma-contig.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/videobuf-dma-contig.c 2009-12-08 17:39:43.676598452 -0500
+@@ -105,7 +105,7 @@ static void videobuf_vm_close(struct vm_
+ }
+ }
+
+-static struct vm_operations_struct videobuf_vm_ops = {
++static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/vino.c linux-2.6.31.7/drivers/media/video/vino.c
+--- linux-2.6.31.7/drivers/media/video/vino.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/vino.c 2009-12-08 17:39:43.677748182 -0500
+@@ -3858,7 +3858,7 @@ static void vino_vm_close(struct vm_area
+ dprintk("vino_vm_close(): count = %d\n", fb->map_count);
+ }
+
+-static struct vm_operations_struct vino_vm_ops = {
++static const struct vm_operations_struct vino_vm_ops = {
+ .open = vino_vm_open,
+ .close = vino_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/zc0301/zc0301_core.c linux-2.6.31.7/drivers/media/video/zc0301/zc0301_core.c
+--- linux-2.6.31.7/drivers/media/video/zc0301/zc0301_core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/zc0301/zc0301_core.c 2009-12-08 17:39:43.678744209 -0500
+@@ -933,7 +933,7 @@ static void zc0301_vm_close(struct vm_ar
+ }
+
+
+-static struct vm_operations_struct zc0301_vm_ops = {
++static const struct vm_operations_struct zc0301_vm_ops = {
+ .open = zc0301_vm_open,
+ .close = zc0301_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/media/video/zoran/zoran_driver.c linux-2.6.31.7/drivers/media/video/zoran/zoran_driver.c
+--- linux-2.6.31.7/drivers/media/video/zoran/zoran_driver.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/media/video/zoran/zoran_driver.c 2009-12-08 17:39:43.695745820 -0500
+@@ -3172,7 +3172,7 @@ zoran_vm_close (struct vm_area_struct *v
+ mutex_unlock(&zr->resource_lock);
+ }
+
+-static struct vm_operations_struct zoran_vm_ops = {
++static const struct vm_operations_struct zoran_vm_ops = {
+ .open = zoran_vm_open,
+ .close = zoran_vm_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/message/i2o/i2o_proc.c linux-2.6.31.7/drivers/message/i2o/i2o_proc.c
+--- linux-2.6.31.7/drivers/message/i2o/i2o_proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/message/i2o/i2o_proc.c 2009-12-08 17:39:43.705592720 -0500
+@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
+ "Array Controller Device"
+ };
+
+-static char *chtostr(u8 * chars, int n)
+-{
+- char tmp[256];
+- tmp[0] = 0;
+- return strncat(tmp, (char *)chars, n);
+-}
+-
+ static int i2o_report_query_status(struct seq_file *seq, int block_status,
+ char *group)
+ {
+@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
+
+ seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+ seq_printf(seq, "%-#8x", ddm_table.module_id);
+- seq_printf(seq, "%-29s",
+- chtostr(ddm_table.module_name_version, 28));
++ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
+ seq_printf(seq, "%9d ", ddm_table.data_size);
+ seq_printf(seq, "%8d", ddm_table.code_size);
+
+@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
+
+ seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+ seq_printf(seq, "%-#8x", dst->module_id);
+- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
++ seq_printf(seq, "%-.28s", dst->module_name_version);
++ seq_printf(seq, "%-.8s", dst->date);
+ seq_printf(seq, "%8d ", dst->module_size);
+ seq_printf(seq, "%8d ", dst->mpb_size);
+ seq_printf(seq, "0x%04x", dst->module_flags);
+@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
+ seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
+ seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
+ seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
+- seq_printf(seq, "Vendor info : %s\n",
+- chtostr((u8 *) (work32 + 2), 16));
+- seq_printf(seq, "Product info : %s\n",
+- chtostr((u8 *) (work32 + 6), 16));
+- seq_printf(seq, "Description : %s\n",
+- chtostr((u8 *) (work32 + 10), 16));
+- seq_printf(seq, "Product rev. : %s\n",
+- chtostr((u8 *) (work32 + 14), 8));
++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, (u8 *) (work32 + 16),
+@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
+ }
+
+ seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+- seq_printf(seq, "Module name : %s\n",
+- chtostr(result.module_name, 24));
+- seq_printf(seq, "Module revision : %s\n",
+- chtostr(result.module_rev, 8));
++ seq_printf(seq, "Module name : %.24s\n", result.module_name);
++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
+ return 0;
+ }
+
+- seq_printf(seq, "Device name : %s\n",
+- chtostr(result.device_name, 64));
+- seq_printf(seq, "Service name : %s\n",
+- chtostr(result.service_name, 64));
+- seq_printf(seq, "Physical name : %s\n",
+- chtostr(result.physical_location, 64));
+- seq_printf(seq, "Instance number : %s\n",
+- chtostr(result.instance_number, 4));
++ seq_printf(seq, "Device name : %.64s\n", result.device_name);
++ seq_printf(seq, "Service name : %.64s\n", result.service_name);
++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
+
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/drivers/mfd/ab3100-core.c linux-2.6.31.7/drivers/mfd/ab3100-core.c
+--- linux-2.6.31.7/drivers/mfd/ab3100-core.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/mfd/ab3100-core.c 2009-12-08 17:39:43.708599450 -0500
+@@ -465,7 +465,7 @@ static int ab3100_get_set_reg_open_file(
+ return 0;
+ }
+
+-static int ab3100_get_set_reg(struct file *file,
++static ssize_t ab3100_get_set_reg(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+diff -urNp linux-2.6.31.7/drivers/misc/ibmasm/ibmasmfs.c linux-2.6.31.7/drivers/misc/ibmasm/ibmasmfs.c
+--- linux-2.6.31.7/drivers/misc/ibmasm/ibmasmfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/misc/ibmasm/ibmasmfs.c 2009-12-08 17:39:43.708599450 -0500
+@@ -97,7 +97,7 @@ static int ibmasmfs_get_super(struct fil
+ return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
+ }
+
+-static struct super_operations ibmasmfs_s_ops = {
++static const struct super_operations ibmasmfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ };
+diff -urNp linux-2.6.31.7/drivers/misc/kgdbts.c linux-2.6.31.7/drivers/misc/kgdbts.c
+--- linux-2.6.31.7/drivers/misc/kgdbts.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/misc/kgdbts.c 2009-12-08 17:39:43.713820702 -0500
+@@ -118,7 +118,7 @@
+ } while (0)
+ #define MAX_CONFIG_LEN 40
+
+-static struct kgdb_io kgdbts_io_ops;
++static const struct kgdb_io kgdbts_io_ops;
+ static char get_buf[BUFMAX];
+ static int get_buf_cnt;
+ static char put_buf[BUFMAX];
+@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
+ module_put(THIS_MODULE);
+ }
+
+-static struct kgdb_io kgdbts_io_ops = {
++static const struct kgdb_io kgdbts_io_ops = {
+ .name = "kgdbts",
+ .read_char = kgdbts_get_char,
+ .write_char = kgdbts_put_char,
+diff -urNp linux-2.6.31.7/drivers/misc/phantom.c linux-2.6.31.7/drivers/misc/phantom.c
+--- linux-2.6.31.7/drivers/misc/phantom.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/misc/phantom.c 2009-12-08 17:39:43.713820702 -0500
+@@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct
+ return mask;
+ }
+
+-static struct file_operations phantom_file_ops = {
++static const struct file_operations phantom_file_ops = {
+ .open = phantom_open,
+ .release = phantom_release,
+ .unlocked_ioctl = phantom_ioctl,
+diff -urNp linux-2.6.31.7/drivers/misc/sgi-gru/grufile.c linux-2.6.31.7/drivers/misc/sgi-gru/grufile.c
+--- linux-2.6.31.7/drivers/misc/sgi-gru/grufile.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/misc/sgi-gru/grufile.c 2009-12-08 17:39:43.714750990 -0500
+@@ -53,7 +53,7 @@ struct gru_stats_s gru_stats;
+ /* Guaranteed user available resources on each node */
+ static int max_user_cbrs, max_user_dsr_bytes;
+
+-static struct file_operations gru_fops;
++static const struct file_operations gru_fops;
+ static struct miscdevice gru_miscdev;
+
+
+@@ -426,7 +426,7 @@ static void __exit gru_exit(void)
+ gru_proc_exit();
+ }
+
+-static struct file_operations gru_fops = {
++static const struct file_operations gru_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = gru_file_unlocked_ioctl,
+ .mmap = gru_file_mmap,
+@@ -438,7 +438,7 @@ static struct miscdevice gru_miscdev = {
+ .fops = &gru_fops,
+ };
+
+-struct vm_operations_struct gru_vm_ops = {
++const struct vm_operations_struct gru_vm_ops = {
+ .close = gru_vma_close,
+ .fault = gru_fault,
+ };
+diff -urNp linux-2.6.31.7/drivers/misc/sgi-gru/grutables.h linux-2.6.31.7/drivers/misc/sgi-gru/grutables.h
+--- linux-2.6.31.7/drivers/misc/sgi-gru/grutables.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/misc/sgi-gru/grutables.h 2009-12-08 17:39:43.714750990 -0500
+@@ -624,7 +624,7 @@ static inline int is_kernel_context(stru
+ */
+ struct gru_unload_context_req;
+
+-extern struct vm_operations_struct gru_vm_ops;
++extern const struct vm_operations_struct gru_vm_ops;
+ extern struct device *grudev;
+
+ extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
+diff -urNp linux-2.6.31.7/drivers/mmc/core/debugfs.c linux-2.6.31.7/drivers/mmc/core/debugfs.c
+--- linux-2.6.31.7/drivers/mmc/core/debugfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/mmc/core/debugfs.c 2009-12-08 17:39:43.714750990 -0500
+@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct in
+ return 0;
+ }
+
+-static struct file_operations mmc_dbg_ext_csd_fops = {
++static const struct file_operations mmc_dbg_ext_csd_fops = {
+ .open = mmc_ext_csd_open,
+ .read = mmc_ext_csd_read,
+ .release = mmc_ext_csd_release,
+diff -urNp linux-2.6.31.7/drivers/mtd/devices/doc2000.c linux-2.6.31.7/drivers/mtd/devices/doc2000.c
+--- linux-2.6.31.7/drivers/mtd/devices/doc2000.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/mtd/devices/doc2000.c 2009-12-08 17:39:43.714750990 -0500
+@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
+
+ /* The ECC will not be calculated correctly if less than 512 is written */
+ /* DBB-
+- if (len != 0x200 && eccbuf)
++ if (len != 0x200)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
+diff -urNp linux-2.6.31.7/drivers/mtd/devices/doc2001.c linux-2.6.31.7/drivers/mtd/devices/doc2001.c
+--- linux-2.6.31.7/drivers/mtd/devices/doc2001.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/mtd/devices/doc2001.c 2009-12-08 17:39:43.719757573 -0500
+@@ -395,6 +395,8 @@ static int doc_read (struct mtd_info *mt
+ /* Don't allow read past end of device */
+ if (from >= this->totlen)
+ return -EINVAL;
++ if (!len)
++ return -EINVAL;
+
+ /* Don't allow a single read to cross a 512-byte block boundary */
+ if (from + len > ((from | 0x1ff) + 1))
+diff -urNp linux-2.6.31.7/drivers/mtd/ubi/build.c linux-2.6.31.7/drivers/mtd/ubi/build.c
+--- linux-2.6.31.7/drivers/mtd/ubi/build.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/mtd/ubi/build.c 2009-12-08 17:39:43.720749626 -0500
+@@ -1257,7 +1257,7 @@ static int __init bytes_str_to_int(const
+ unsigned long result;
+
+ result = simple_strtoul(str, &endp, 0);
+- if (str == endp || result < 0) {
++ if (str == endp) {
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
+ return -EINVAL;
+diff -urNp linux-2.6.31.7/drivers/net/e1000e/82571.c linux-2.6.31.7/drivers/net/e1000e/82571.c
+--- linux-2.6.31.7/drivers/net/e1000e/82571.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/e1000e/82571.c 2009-12-08 17:39:43.736608648 -0500
+@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
+ temp = er32(ICRXDMTC);
+ }
+
+-static struct e1000_mac_operations e82571_mac_ops = {
++static const struct e1000_mac_operations e82571_mac_ops = {
+ /* .check_mng_mode: mac type dependent */
+ /* .check_for_link: media type dependent */
+ .id_led_init = e1000e_id_led_init,
+@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
+ .setup_led = e1000e_setup_led_generic,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_igp = {
++static const struct e1000_phy_operations e82_phy_ops_igp = {
+ .acquire_phy = e1000_get_hw_semaphore_82571,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit_phy = NULL,
+@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_m88 = {
++static const struct e1000_phy_operations e82_phy_ops_m88 = {
+ .acquire_phy = e1000_get_hw_semaphore_82571,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit_phy = e1000e_phy_sw_reset,
+@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_phy_operations e82_phy_ops_bm = {
++static const struct e1000_phy_operations e82_phy_ops_bm = {
+ .acquire_phy = e1000_get_hw_semaphore_82571,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit_phy = e1000e_phy_sw_reset,
+@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
+ .cfg_on_link_up = NULL,
+ };
+
+-static struct e1000_nvm_operations e82571_nvm_ops = {
++static const struct e1000_nvm_operations e82571_nvm_ops = {
+ .acquire_nvm = e1000_acquire_nvm_82571,
+ .read_nvm = e1000e_read_nvm_eerd,
+ .release_nvm = e1000_release_nvm_82571,
+diff -urNp linux-2.6.31.7/drivers/net/e1000e/e1000.h linux-2.6.31.7/drivers/net/e1000e/e1000.h
+--- linux-2.6.31.7/drivers/net/e1000e/e1000.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/e1000e/e1000.h 2009-12-08 17:39:43.737755216 -0500
+@@ -359,9 +359,9 @@ struct e1000_info {
+ u32 pba;
+ u32 max_hw_frame_size;
+ s32 (*get_variants)(struct e1000_adapter *);
+- struct e1000_mac_operations *mac_ops;
+- struct e1000_phy_operations *phy_ops;
+- struct e1000_nvm_operations *nvm_ops;
++ const struct e1000_mac_operations *mac_ops;
++ const struct e1000_phy_operations *phy_ops;
++ const struct e1000_nvm_operations *nvm_ops;
+ };
+
+ /* hardware capability, feature, and workaround flags */
+diff -urNp linux-2.6.31.7/drivers/net/e1000e/es2lan.c linux-2.6.31.7/drivers/net/e1000e/es2lan.c
+--- linux-2.6.31.7/drivers/net/e1000e/es2lan.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/e1000e/es2lan.c 2009-12-08 17:39:43.738748690 -0500
+@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
+ temp = er32(ICRXDMTC);
+ }
+
+-static struct e1000_mac_operations es2_mac_ops = {
++static const struct e1000_mac_operations es2_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
+ .check_mng_mode = e1000e_check_mng_mode_generic,
+ /* check_for_link dependent on media type */
+@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
+ .setup_led = e1000e_setup_led_generic,
+ };
+
+-static struct e1000_phy_operations es2_phy_ops = {
++static const struct e1000_phy_operations es2_phy_ops = {
+ .acquire_phy = e1000_acquire_phy_80003es2lan,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit_phy = e1000e_phy_sw_reset,
+@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+ };
+
+-static struct e1000_nvm_operations es2_nvm_ops = {
++static const struct e1000_nvm_operations es2_nvm_ops = {
+ .acquire_nvm = e1000_acquire_nvm_80003es2lan,
+ .read_nvm = e1000e_read_nvm_eerd,
+ .release_nvm = e1000_release_nvm_80003es2lan,
+diff -urNp linux-2.6.31.7/drivers/net/e1000e/hw.h linux-2.6.31.7/drivers/net/e1000e/hw.h
+--- linux-2.6.31.7/drivers/net/e1000e/hw.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/e1000e/hw.h 2009-12-08 17:39:43.740756941 -0500
+@@ -774,13 +774,13 @@ struct e1000_phy_operations {
+
+ /* Function pointers for the NVM. */
+ struct e1000_nvm_operations {
+- s32 (*acquire_nvm)(struct e1000_hw *);
+- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
+- void (*release_nvm)(struct e1000_hw *);
+- s32 (*update_nvm)(struct e1000_hw *);
+- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+- s32 (*validate_nvm)(struct e1000_hw *);
+- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
++ s32 (* const acquire_nvm)(struct e1000_hw *);
++ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
++ void (* const release_nvm)(struct e1000_hw *);
++ s32 (* const update_nvm)(struct e1000_hw *);
++ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
++ s32 (* const validate_nvm)(struct e1000_hw *);
++ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+ };
+
+ struct e1000_mac_info {
+diff -urNp linux-2.6.31.7/drivers/net/e1000e/ich8lan.c linux-2.6.31.7/drivers/net/e1000e/ich8lan.c
+--- linux-2.6.31.7/drivers/net/e1000e/ich8lan.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/e1000e/ich8lan.c 2009-12-08 17:39:43.753660936 -0500
+@@ -3078,7 +3078,7 @@ static void e1000_clear_hw_cntrs_ich8lan
+ }
+ }
+
+-static struct e1000_mac_operations ich8_mac_ops = {
++static const struct e1000_mac_operations ich8_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
+ .check_mng_mode = e1000_check_mng_mode_ich8lan,
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
+@@ -3096,7 +3096,7 @@ static struct e1000_mac_operations ich8_
+ /* id_led_init dependent on mac type */
+ };
+
+-static struct e1000_phy_operations ich8_phy_ops = {
++static const struct e1000_phy_operations ich8_phy_ops = {
+ .acquire_phy = e1000_acquire_swflag_ich8lan,
+ .check_reset_block = e1000_check_reset_block_ich8lan,
+ .commit_phy = NULL,
+@@ -3112,7 +3112,7 @@ static struct e1000_phy_operations ich8_
+ .write_phy_reg = e1000e_write_phy_reg_igp,
+ };
+
+-static struct e1000_nvm_operations ich8_nvm_ops = {
++static const struct e1000_nvm_operations ich8_nvm_ops = {
+ .acquire_nvm = e1000_acquire_swflag_ich8lan,
+ .read_nvm = e1000_read_nvm_ich8lan,
+ .release_nvm = e1000_release_swflag_ich8lan,
+diff -urNp linux-2.6.31.7/drivers/net/ibmveth.c linux-2.6.31.7/drivers/net/ibmveth.c
+--- linux-2.6.31.7/drivers/net/ibmveth.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/ibmveth.c 2009-12-08 17:39:43.763091533 -0500
+@@ -1576,7 +1576,7 @@ static struct attribute * veth_pool_attr
+ NULL,
+ };
+
+-static struct sysfs_ops veth_pool_ops = {
++static const struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/net/igb/e1000_82575.c linux-2.6.31.7/drivers/net/igb/e1000_82575.c
+--- linux-2.6.31.7/drivers/net/igb/e1000_82575.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/igb/e1000_82575.c 2009-12-08 17:39:43.777822806 -0500
+@@ -1461,7 +1461,7 @@ void igb_vmdq_set_replication_pf(struct
+ wr32(E1000_VT_CTL, vt_ctl);
+ }
+
+-static struct e1000_mac_operations e1000_mac_ops_82575 = {
++static const struct e1000_mac_operations e1000_mac_ops_82575 = {
+ .reset_hw = igb_reset_hw_82575,
+ .init_hw = igb_init_hw_82575,
+ .check_for_link = igb_check_for_link_82575,
+@@ -1470,13 +1470,13 @@ static struct e1000_mac_operations e1000
+ .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+ };
+
+-static struct e1000_phy_operations e1000_phy_ops_82575 = {
++static const struct e1000_phy_operations e1000_phy_ops_82575 = {
+ .acquire = igb_acquire_phy_82575,
+ .get_cfg_done = igb_get_cfg_done_82575,
+ .release = igb_release_phy_82575,
+ };
+
+-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
++static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+ .acquire = igb_acquire_nvm_82575,
+ .read = igb_read_nvm_eerd,
+ .release = igb_release_nvm_82575,
+diff -urNp linux-2.6.31.7/drivers/net/igb/e1000_hw.h linux-2.6.31.7/drivers/net/igb/e1000_hw.h
+--- linux-2.6.31.7/drivers/net/igb/e1000_hw.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/igb/e1000_hw.h 2009-12-08 17:39:43.785560946 -0500
+@@ -302,17 +302,17 @@ struct e1000_phy_operations {
+ };
+
+ struct e1000_nvm_operations {
+- s32 (*acquire)(struct e1000_hw *);
+- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+- void (*release)(struct e1000_hw *);
+- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
++ s32 (* const acquire)(struct e1000_hw *);
++ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
++ void (* const release)(struct e1000_hw *);
++ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
+ };
+
+ struct e1000_info {
+ s32 (*get_invariants)(struct e1000_hw *);
+- struct e1000_mac_operations *mac_ops;
+- struct e1000_phy_operations *phy_ops;
+- struct e1000_nvm_operations *nvm_ops;
++ const struct e1000_mac_operations *mac_ops;
++ const struct e1000_phy_operations *phy_ops;
++ const struct e1000_nvm_operations *nvm_ops;
+ };
+
+ extern const struct e1000_info e1000_82575_info;
+diff -urNp linux-2.6.31.7/drivers/net/irda/vlsi_ir.c linux-2.6.31.7/drivers/net/irda/vlsi_ir.c
+--- linux-2.6.31.7/drivers/net/irda/vlsi_ir.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/irda/vlsi_ir.c 2009-12-08 17:39:43.795764259 -0500
+@@ -906,13 +906,12 @@ static int vlsi_hard_start_xmit(struct s
+ /* no race - tx-ring already empty */
+ vlsi_set_baud(idev, iobase);
+ netif_wake_queue(ndev);
+- }
+- else
+- ;
++ } else {
+ /* keep the speed change pending like it would
+ * for any len>0 packet. tx completion interrupt
+ * will apply it when the tx ring becomes empty.
+ */
++ }
+ spin_unlock_irqrestore(&idev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return 0;
+diff -urNp linux-2.6.31.7/drivers/net/iseries_veth.c linux-2.6.31.7/drivers/net/iseries_veth.c
+--- linux-2.6.31.7/drivers/net/iseries_veth.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/iseries_veth.c 2009-12-08 17:39:43.807304400 -0500
+@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
+ NULL
+ };
+
+-static struct sysfs_ops veth_cnx_sysfs_ops = {
++static const struct sysfs_ops veth_cnx_sysfs_ops = {
+ .show = veth_cnx_attribute_show
+ };
+
+@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
+ NULL
+ };
+
+-static struct sysfs_ops veth_port_sysfs_ops = {
++static const struct sysfs_ops veth_port_sysfs_ops = {
+ .show = veth_port_attribute_show
+ };
+
+diff -urNp linux-2.6.31.7/drivers/net/pcnet32.c linux-2.6.31.7/drivers/net/pcnet32.c
+--- linux-2.6.31.7/drivers/net/pcnet32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/pcnet32.c 2009-12-08 17:39:43.807764898 -0500
+@@ -78,7 +78,7 @@ static int cards_found;
+ /*
+ * VLB I/O addresses
+ */
+-static unsigned int pcnet32_portlist[] __initdata =
++static unsigned int pcnet32_portlist[] __devinitdata =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+ static int pcnet32_debug = 0;
+diff -urNp linux-2.6.31.7/drivers/net/tg3.h linux-2.6.31.7/drivers/net/tg3.h
+--- linux-2.6.31.7/drivers/net/tg3.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/tg3.h 2009-12-08 17:39:43.808700531 -0500
+@@ -89,6 +89,7 @@
+ #define CHIPREV_ID_5750_A0 0x4000
+ #define CHIPREV_ID_5750_A1 0x4001
+ #define CHIPREV_ID_5750_A3 0x4003
++#define CHIPREV_ID_5750_C1 0x4201
+ #define CHIPREV_ID_5750_C2 0x4202
+ #define CHIPREV_ID_5752_A0_HW 0x5000
+ #define CHIPREV_ID_5752_A0 0x6000
+diff -urNp linux-2.6.31.7/drivers/net/usb/hso.c linux-2.6.31.7/drivers/net/usb/hso.c
+--- linux-2.6.31.7/drivers/net/usb/hso.c 2009-12-08 17:29:51.609654313 -0500
++++ linux-2.6.31.7/drivers/net/usb/hso.c 2009-12-08 17:44:13.554087048 -0500
+@@ -258,7 +258,7 @@ struct hso_serial {
+
+ /* from usb_serial_port */
+ struct tty_struct *tty;
+- int open_count;
++ atomic_t open_count;
+ spinlock_t serial_lock;
+
+ int (*write_data) (struct hso_serial *serial);
+@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_
+ struct urb *urb;
+
+ urb = serial->rx_urb[0];
+- if (serial->open_count > 0) {
++ if (atomic_read(&serial->open_count) > 0) {
+ count = put_rxbuf_data(urb, serial);
+ if (count == -1)
+ return;
+@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_cal
+ DUMP1(urb->transfer_buffer, urb->actual_length);
+
+ /* Anyone listening? */
+- if (serial->open_count == 0)
++ if (atomic_read(&serial->open_count) == 0)
+ return;
+
+ if (status == 0) {
+@@ -1310,8 +1310,7 @@ static int hso_serial_open(struct tty_st
+ spin_unlock_irq(&serial->serial_lock);
+
+ /* check for port already opened, if not set the termios */
+- serial->open_count++;
+- if (serial->open_count == 1) {
++ if (atomic_inc_return(&serial->open_count) == 1) {
+ tty->low_latency = 1;
+ serial->rx_state = RX_IDLE;
+ /* Force default termio settings */
+@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
+ result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+ if (result) {
+ hso_stop_serial_device(serial->parent);
+- serial->open_count--;
++ atomic_dec(&serial->open_count);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
+ }
+ } else {
+@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
+
+ /* reset the rts and dtr */
+ /* do the actual close */
+- serial->open_count--;
++ atomic_dec(&serial->open_count);
+
+- if (serial->open_count <= 0) {
+- serial->open_count = 0;
++ if (atomic_read(&serial->open_count) <= 0) {
++ atomic_set(&serial->open_count, 0);
+ spin_lock_irq(&serial->serial_lock);
+ if (serial->tty == tty) {
+ serial->tty->driver_data = NULL;
+@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
+
+ /* the actual setup */
+ spin_lock_irqsave(&serial->serial_lock, flags);
+- if (serial->open_count)
++ if (atomic_read(&serial->open_count))
+ _hso_serial_set_termios(tty, old);
+ else
+ tty->termios = old;
+@@ -3089,7 +3088,7 @@ static int hso_resume(struct usb_interfa
+ /* Start all serial ports */
+ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+ if (serial_table[i] && (serial_table[i]->interface == iface)) {
+- if (dev2ser(serial_table[i])->open_count) {
++ if (atomic_read(&dev2ser(serial_table[i])->open_count)) {
+ result =
+ hso_start_serial_device(serial_table[i], GFP_NOIO);
+ hso_kick_transmit(dev2ser(serial_table[i]));
+diff -urNp linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-3945.c
+--- linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-3945.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-3945.c 2009-12-08 17:39:43.828776350 -0500
+@@ -2867,7 +2867,7 @@ static struct iwl_hcmd_utils_ops iwl3945
+ .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
+ };
+
+-static struct iwl_ops iwl3945_ops = {
++static const struct iwl_ops iwl3945_ops = {
+ .ucode = &iwl3945_ucode,
+ .lib = &iwl3945_lib,
+ .hcmd = &iwl3945_hcmd,
+diff -urNp linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-4965.c
+--- linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-4965.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-4965.c 2009-12-08 17:39:43.829767011 -0500
+@@ -2324,7 +2324,7 @@ static struct iwl_lib_ops iwl4965_lib =
+ },
+ };
+
+-static struct iwl_ops iwl4965_ops = {
++static const struct iwl_ops iwl4965_ops = {
+ .ucode = &iwl4965_ucode,
+ .lib = &iwl4965_lib,
+ .hcmd = &iwl4965_hcmd,
+diff -urNp linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-5000.c
+--- linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-5000.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-5000.c 2009-12-08 17:39:43.844766023 -0500
+@@ -1597,14 +1597,14 @@ static struct iwl_lib_ops iwl5150_lib =
+ },
+ };
+
+-struct iwl_ops iwl5000_ops = {
++const struct iwl_ops iwl5000_ops = {
+ .ucode = &iwl5000_ucode,
+ .lib = &iwl5000_lib,
+ .hcmd = &iwl5000_hcmd,
+ .utils = &iwl5000_hcmd_utils,
+ };
+
+-static struct iwl_ops iwl5150_ops = {
++static const struct iwl_ops iwl5150_ops = {
+ .ucode = &iwl5000_ucode,
+ .lib = &iwl5150_lib,
+ .hcmd = &iwl5000_hcmd,
+diff -urNp linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-6000.c
+--- linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-6000.c 2009-12-08 17:29:51.611669459 -0500
++++ linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-6000.c 2009-12-08 17:39:43.850774242 -0500
+@@ -68,7 +68,7 @@ static struct iwl_hcmd_utils_ops iwl6000
+ .calc_rssi = iwl5000_calc_rssi,
+ };
+
+-static struct iwl_ops iwl6000_ops = {
++static const struct iwl_ops iwl6000_ops = {
+ .ucode = &iwl5000_ucode,
+ .lib = &iwl5000_lib,
+ .hcmd = &iwl5000_hcmd,
+diff -urNp linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-dev.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/net/wireless/iwlwifi/iwl-dev.h 2009-12-08 17:39:43.865575308 -0500
+@@ -65,7 +65,7 @@ extern struct iwl_cfg iwl1000_bgn_cfg;
+
+ /* shared structures from iwl-5000.c */
+ extern struct iwl_mod_params iwl50_mod_params;
+-extern struct iwl_ops iwl5000_ops;
++extern const struct iwl_ops iwl5000_ops;
+ extern struct iwl_ucode_ops iwl5000_ucode;
+ extern struct iwl_lib_ops iwl5000_lib;
+ extern struct iwl_hcmd_ops iwl5000_hcmd;
+diff -urNp linux-2.6.31.7/drivers/oprofile/buffer_sync.c linux-2.6.31.7/drivers/oprofile/buffer_sync.c
+--- linux-2.6.31.7/drivers/oprofile/buffer_sync.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/oprofile/buffer_sync.c 2009-12-08 17:39:43.865575308 -0500
+@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
+ if (cookie == NO_COOKIE)
+ offset = pc;
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ offset = pc;
+ }
+ if (cookie != last_cookie) {
+@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
+ /* add userspace sample */
+
+ if (!mm) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mm);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+ return 0;
+ }
+
+ cookie = lookup_dcookie(mm, s->eip, &offset);
+
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ return 0;
+ }
+
+@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
+ /* ignore backtraces if failed to add a sample */
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ release_mm(mm);
+diff -urNp linux-2.6.31.7/drivers/oprofile/event_buffer.c linux-2.6.31.7/drivers/oprofile/event_buffer.c
+--- linux-2.6.31.7/drivers/oprofile/event_buffer.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/oprofile/event_buffer.c 2009-12-08 17:39:43.865575308 -0500
+@@ -42,7 +42,7 @@ static atomic_t buffer_ready = ATOMIC_IN
+ void add_event_entry(unsigned long value)
+ {
+ if (buffer_pos == buffer_size) {
+- atomic_inc(&oprofile_stats.event_lost_overflow);
++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+ return;
+ }
+
+diff -urNp linux-2.6.31.7/drivers/oprofile/oprofilefs.c linux-2.6.31.7/drivers/oprofile/oprofilefs.c
+--- linux-2.6.31.7/drivers/oprofile/oprofilefs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/oprofile/oprofilefs.c 2009-12-08 17:39:43.866763444 -0500
+@@ -35,7 +35,7 @@ static struct inode *oprofilefs_get_inod
+ }
+
+
+-static struct super_operations s_ops = {
++static const struct super_operations s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ };
+@@ -187,7 +187,7 @@ static const struct file_operations atom
+
+
+ int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+- char const *name, atomic_t *val)
++ char const *name, atomic_unchecked_t *val)
+ {
+ struct dentry *d = __oprofilefs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444);
+diff -urNp linux-2.6.31.7/drivers/oprofile/oprofile_stats.c linux-2.6.31.7/drivers/oprofile/oprofile_stats.c
+--- linux-2.6.31.7/drivers/oprofile/oprofile_stats.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/oprofile/oprofile_stats.c 2009-12-08 17:39:43.866763444 -0500
+@@ -30,10 +30,10 @@ void oprofile_reset_stats(void)
+ cpu_buf->sample_invalid_eip = 0;
+ }
+
+- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.event_lost_overflow, 0);
+- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
+ }
+
+
+diff -urNp linux-2.6.31.7/drivers/oprofile/oprofile_stats.h linux-2.6.31.7/drivers/oprofile/oprofile_stats.h
+--- linux-2.6.31.7/drivers/oprofile/oprofile_stats.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/oprofile/oprofile_stats.h 2009-12-08 17:39:43.866763444 -0500
+@@ -13,10 +13,10 @@
+ #include <asm/atomic.h>
+
+ struct oprofile_stat_struct {
+- atomic_t sample_lost_no_mm;
+- atomic_t sample_lost_no_mapping;
+- atomic_t bt_lost_no_mapping;
+- atomic_t event_lost_overflow;
++ atomic_unchecked_t sample_lost_no_mm;
++ atomic_unchecked_t sample_lost_no_mapping;
++ atomic_unchecked_t bt_lost_no_mapping;
++ atomic_unchecked_t event_lost_overflow;
+ };
+
+ extern struct oprofile_stat_struct oprofile_stats;
+diff -urNp linux-2.6.31.7/drivers/parisc/pdc_stable.c linux-2.6.31.7/drivers/parisc/pdc_stable.c
+--- linux-2.6.31.7/drivers/parisc/pdc_stable.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/parisc/pdc_stable.c 2009-12-08 17:39:43.867666014 -0500
+@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
+ return ret;
+ }
+
+-static struct sysfs_ops pdcspath_attr_ops = {
++static const struct sysfs_ops pdcspath_attr_ops = {
+ .show = pdcspath_attr_show,
+ .store = pdcspath_attr_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.31.7/drivers/pci/hotplug/acpiphp_glue.c
+--- linux-2.6.31.7/drivers/pci/hotplug/acpiphp_glue.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/hotplug/acpiphp_glue.c 2009-12-08 17:39:43.877669164 -0500
+@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
+ }
+
+
+-static struct acpi_dock_ops acpiphp_dock_ops = {
++static const struct acpi_dock_ops acpiphp_dock_ops = {
+ .handler = handle_hotplug_event_func,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.31.7/drivers/pci/hotplug/cpqphp_nvram.c
+--- linux-2.6.31.7/drivers/pci/hotplug/cpqphp_nvram.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/hotplug/cpqphp_nvram.c 2009-12-08 17:39:43.889890643 -0500
+@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
+
+ void compaq_nvram_init (void __iomem *rom_start)
+ {
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if (rom_start) {
+ compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+ }
++#endif
++
+ dbg("int15 entry = %p\n", compaq_int15_entry_point);
+
+ /* initialize our int15 lock */
+diff -urNp linux-2.6.31.7/drivers/pci/hotplug/fakephp.c linux-2.6.31.7/drivers/pci/hotplug/fakephp.c
+--- linux-2.6.31.7/drivers/pci/hotplug/fakephp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/hotplug/fakephp.c 2009-12-08 17:39:43.889890643 -0500
+@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
+ }
+
+ static struct kobj_type legacy_ktype = {
+- .sysfs_ops = &(struct sysfs_ops){
++ .sysfs_ops = &(const struct sysfs_ops){
+ .store = legacy_store, .show = legacy_show
+ },
+ .release = &legacy_release,
+diff -urNp linux-2.6.31.7/drivers/pci/pcie/portdrv_pci.c linux-2.6.31.7/drivers/pci/pcie/portdrv_pci.c
+--- linux-2.6.31.7/drivers/pci/pcie/portdrv_pci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/pcie/portdrv_pci.c 2009-12-08 17:39:43.890712298 -0500
+@@ -249,7 +249,7 @@ static void pcie_portdrv_err_resume(stru
+ static const struct pci_device_id port_pci_ids[] = { {
+ /* handle any PCI-Express port */
+ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
+- }, { /* end: all zeroes */ }
++ }, { 0, 0, 0, 0, 0, 0, 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, port_pci_ids);
+
+diff -urNp linux-2.6.31.7/drivers/pci/proc.c linux-2.6.31.7/drivers/pci/proc.c
+--- linux-2.6.31.7/drivers/pci/proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/proc.c 2009-12-08 17:39:43.890712298 -0500
+@@ -480,7 +480,16 @@ static const struct file_operations proc
+ static int __init pci_proc_init(void)
+ {
+ struct pci_dev *dev = NULL;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
++#endif
+ proc_create("devices", 0, proc_bus_pci_dir,
+ &proc_bus_pci_dev_operations);
+ proc_initialized = 1;
+diff -urNp linux-2.6.31.7/drivers/pci/slot.c linux-2.6.31.7/drivers/pci/slot.c
+--- linux-2.6.31.7/drivers/pci/slot.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pci/slot.c 2009-12-08 17:39:43.891668355 -0500
+@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
+ return attribute->store ? attribute->store(slot, buf, len) : -EIO;
+ }
+
+-static struct sysfs_ops pci_slot_sysfs_ops = {
++static const struct sysfs_ops pci_slot_sysfs_ops = {
+ .show = pci_slot_attr_show,
+ .store = pci_slot_attr_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/pcmcia/ti113x.h linux-2.6.31.7/drivers/pcmcia/ti113x.h
+--- linux-2.6.31.7/drivers/pcmcia/ti113x.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pcmcia/ti113x.h 2009-12-08 17:39:43.892766035 -0500
+@@ -903,7 +903,7 @@ static struct pci_device_id ene_tune_tbl
+ DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
+ ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
+
+- {}
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
+diff -urNp linux-2.6.31.7/drivers/pcmcia/yenta_socket.c linux-2.6.31.7/drivers/pcmcia/yenta_socket.c
+--- linux-2.6.31.7/drivers/pcmcia/yenta_socket.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pcmcia/yenta_socket.c 2009-12-08 17:39:43.892766035 -0500
+@@ -1387,7 +1387,7 @@ static struct pci_device_id yenta_table
+
+ /* match any cardbus bridge */
+ CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT),
+- { /* all zeroes */ }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, yenta_table);
+
+diff -urNp linux-2.6.31.7/drivers/platform/x86/acer-wmi.c linux-2.6.31.7/drivers/platform/x86/acer-wmi.c
+--- linux-2.6.31.7/drivers/platform/x86/acer-wmi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/acer-wmi.c 2009-12-08 17:39:43.903695736 -0500
+@@ -916,7 +916,7 @@ static int update_bl_status(struct backl
+ return 0;
+ }
+
+-static struct backlight_ops acer_bl_ops = {
++static const struct backlight_ops acer_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/asus_acpi.c linux-2.6.31.7/drivers/platform/x86/asus_acpi.c
+--- linux-2.6.31.7/drivers/platform/x86/asus_acpi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/asus_acpi.c 2009-12-08 17:39:43.904558205 -0500
+@@ -1402,7 +1402,7 @@ static int asus_hotk_remove(struct acpi_
+ return 0;
+ }
+
+-static struct backlight_ops asus_backlight_data = {
++static const struct backlight_ops asus_backlight_data = {
+ .get_brightness = read_brightness,
+ .update_status = set_brightness_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/asus-laptop.c linux-2.6.31.7/drivers/platform/x86/asus-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/asus-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/asus-laptop.c 2009-12-08 17:39:43.903695736 -0500
+@@ -232,7 +232,7 @@ static struct backlight_device *asus_bac
+ */
+ static int read_brightness(struct backlight_device *bd);
+ static int update_bl_status(struct backlight_device *bd);
+-static struct backlight_ops asusbl_ops = {
++static const struct backlight_ops asusbl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/compal-laptop.c linux-2.6.31.7/drivers/platform/x86/compal-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/compal-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/compal-laptop.c 2009-12-08 17:39:43.904558205 -0500
+@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
+ return set_lcd_level(b->props.brightness);
+ }
+
+-static struct backlight_ops compalbl_ops = {
++static const struct backlight_ops compalbl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/dell-laptop.c linux-2.6.31.7/drivers/platform/x86/dell-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/dell-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/dell-laptop.c 2009-12-08 17:39:43.905777581 -0500
+@@ -305,7 +305,7 @@ static int dell_get_intensity(struct bac
+ return buffer.output[1];
+ }
+
+-static struct backlight_ops dell_ops = {
++static const struct backlight_ops dell_ops = {
+ .get_brightness = dell_get_intensity,
+ .update_status = dell_send_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/eeepc-laptop.c linux-2.6.31.7/drivers/platform/x86/eeepc-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/eeepc-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/eeepc-laptop.c 2009-12-08 17:39:43.905777581 -0500
+@@ -234,7 +234,7 @@ static struct device *eeepc_hwmon_device
+ */
+ static int read_brightness(struct backlight_device *bd);
+ static int update_bl_status(struct backlight_device *bd);
+-static struct backlight_ops eeepcbl_ops = {
++static const struct backlight_ops eeepcbl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/fujitsu-laptop.c linux-2.6.31.7/drivers/platform/x86/fujitsu-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/fujitsu-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/fujitsu-laptop.c 2009-12-08 17:39:43.906780094 -0500
+@@ -442,7 +442,7 @@ static int bl_update_status(struct backl
+ return ret;
+ }
+
+-static struct backlight_ops fujitsubl_ops = {
++static const struct backlight_ops fujitsubl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/msi-laptop.c linux-2.6.31.7/drivers/platform/x86/msi-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/msi-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/msi-laptop.c 2009-12-08 17:39:43.906780094 -0500
+@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
+ return set_lcd_level(b->props.brightness);
+ }
+
+-static struct backlight_ops msibl_ops = {
++static const struct backlight_ops msibl_ops = {
+ .get_brightness = bl_get_brightness,
+ .update_status = bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/panasonic-laptop.c linux-2.6.31.7/drivers/platform/x86/panasonic-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/panasonic-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/panasonic-laptop.c 2009-12-08 17:39:43.906780094 -0500
+@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
+ return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
+ }
+
+-static struct backlight_ops pcc_backlight_ops = {
++static const struct backlight_ops pcc_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/sony-laptop.c linux-2.6.31.7/drivers/platform/x86/sony-laptop.c
+--- linux-2.6.31.7/drivers/platform/x86/sony-laptop.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/sony-laptop.c 2009-12-08 17:39:43.914581478 -0500
+@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
+ }
+
+ static struct backlight_device *sony_backlight_device;
+-static struct backlight_ops sony_backlight_ops = {
++static const struct backlight_ops sony_backlight_ops = {
+ .update_status = sony_backlight_update_status,
+ .get_brightness = sony_backlight_get_brightness,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/thinkpad_acpi.c linux-2.6.31.7/drivers/platform/x86/thinkpad_acpi.c
+--- linux-2.6.31.7/drivers/platform/x86/thinkpad_acpi.c 2009-12-08 17:29:51.615737076 -0500
++++ linux-2.6.31.7/drivers/platform/x86/thinkpad_acpi.c 2009-12-08 17:39:43.935774597 -0500
+@@ -5635,7 +5635,7 @@ static int brightness_get(struct backlig
+ return status & TP_EC_BACKLIGHT_LVLMSK;
+ }
+
+-static struct backlight_ops ibm_backlight_data = {
++static const struct backlight_ops ibm_backlight_data = {
+ .get_brightness = brightness_get,
+ .update_status = brightness_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/platform/x86/toshiba_acpi.c linux-2.6.31.7/drivers/platform/x86/toshiba_acpi.c
+--- linux-2.6.31.7/drivers/platform/x86/toshiba_acpi.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/platform/x86/toshiba_acpi.c 2009-12-08 17:39:43.936779244 -0500
+@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
+ return AE_OK;
+ }
+
+-static struct backlight_ops toshiba_backlight_data = {
++static const struct backlight_ops toshiba_backlight_data = {
+ .get_brightness = get_lcd,
+ .update_status = set_lcd_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/pnp/pnpbios/bioscalls.c linux-2.6.31.7/drivers/pnp/pnpbios/bioscalls.c
+--- linux-2.6.31.7/drivers/pnp/pnpbios/bioscalls.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pnp/pnpbios/bioscalls.c 2009-12-08 17:39:43.936779244 -0500
+@@ -60,7 +60,7 @@ set_base(gdt[(selname) >> 3], (u32)(addr
+ set_limit(gdt[(selname) >> 3], size); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc;
++static struct desc_struct bad_bios_desc __read_only;
+
+ /*
+ * At some point we want to use this stack frame pointer to unwind
+@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
+
+ cpu = get_cpu();
+ save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
+ :"memory");
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
+ return status;
+ }
+
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+ int i;
+
+@@ -476,8 +482,10 @@ void pnpbios_calls_init(union pnp_bios_i
+ pnp_bios_callpoint.offset = header->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+
++ pax_open_kernel();
++
+ bad_bios_desc.a = 0;
+- bad_bios_desc.b = 0x00409200;
++ bad_bios_desc.b = 0x00409300;
+
+ set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
+ _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
+@@ -491,4 +499,6 @@ void pnpbios_calls_init(union pnp_bios_i
+ set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
+ __va(header->fields.pm16dseg));
+ }
++
++ pax_close_kernel();
+ }
+diff -urNp linux-2.6.31.7/drivers/pnp/quirks.c linux-2.6.31.7/drivers/pnp/quirks.c
+--- linux-2.6.31.7/drivers/pnp/quirks.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pnp/quirks.c 2009-12-08 17:39:43.936779244 -0500
+@@ -327,7 +327,7 @@ static struct pnp_fixup pnp_fixups[] = {
+ /* PnP resources that might overlap PCI BARs */
+ {"PNP0c01", quirk_system_pci_resources},
+ {"PNP0c02", quirk_system_pci_resources},
+- {""}
++ {"", NULL}
+ };
+
+ void pnp_fixup_device(struct pnp_dev *dev)
+diff -urNp linux-2.6.31.7/drivers/pnp/resource.c linux-2.6.31.7/drivers/pnp/resource.c
+--- linux-2.6.31.7/drivers/pnp/resource.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/pnp/resource.c 2009-12-08 17:39:43.936779244 -0500
+@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*irq < 0 || *irq > 15)
++ if (*irq > 15)
+ return 0;
+
+ /* check if the resource is reserved */
+@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*dma < 0 || *dma == 4 || *dma > 7)
++ if (*dma == 4 || *dma > 7)
+ return 0;
+
+ /* check if the resource is reserved */
+diff -urNp linux-2.6.31.7/drivers/s390/cio/qdio_debug.c linux-2.6.31.7/drivers/s390/cio/qdio_debug.c
+--- linux-2.6.31.7/drivers/s390/cio/qdio_debug.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/s390/cio/qdio_debug.c 2009-12-08 17:39:43.937783785 -0500
+@@ -144,7 +144,7 @@ static void remove_debugfs_entry(struct
+ }
+ }
+
+-static struct file_operations debugfs_fops = {
++static const struct file_operations debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qstat_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/drivers/s390/cio/qdio_perf.c linux-2.6.31.7/drivers/s390/cio/qdio_perf.c
+--- linux-2.6.31.7/drivers/s390/cio/qdio_perf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/s390/cio/qdio_perf.c 2009-12-08 17:39:43.937783785 -0500
+@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct ino
+ return single_open(filp, qdio_perf_proc_show, NULL);
+ }
+
+-static struct file_operations qdio_perf_proc_fops = {
++static const struct file_operations qdio_perf_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = qdio_perf_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/drivers/scsi/ipr.c linux-2.6.31.7/drivers/scsi/ipr.c
+--- linux-2.6.31.7/drivers/scsi/ipr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/scsi/ipr.c 2009-12-08 17:39:43.939767576 -0500
+@@ -5262,7 +5262,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
+ return true;
+ }
+
+-static struct ata_port_operations ipr_sata_ops = {
++static const struct ata_port_operations ipr_sata_ops = {
+ .phy_reset = ipr_ata_phy_reset,
+ .hardreset = ipr_sata_reset,
+ .post_internal_cmd = ipr_ata_post_internal,
+diff -urNp linux-2.6.31.7/drivers/scsi/libfc/fc_exch.c linux-2.6.31.7/drivers/scsi/libfc/fc_exch.c
+--- linux-2.6.31.7/drivers/scsi/libfc/fc_exch.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/scsi/libfc/fc_exch.c 2009-12-08 17:39:43.939767576 -0500
+@@ -73,12 +73,12 @@ struct fc_exch_mgr {
+ * all together if not used XXX
+ */
+ struct {
+- atomic_t no_free_exch;
+- atomic_t no_free_exch_xid;
+- atomic_t xid_not_found;
+- atomic_t xid_busy;
+- atomic_t seq_not_found;
+- atomic_t non_bls_resp;
++ atomic_unchecked_t no_free_exch;
++ atomic_unchecked_t no_free_exch_xid;
++ atomic_unchecked_t xid_not_found;
++ atomic_unchecked_t xid_busy;
++ atomic_unchecked_t seq_not_found;
++ atomic_unchecked_t non_bls_resp;
+ } stats;
+ struct fc_exch **exches; /* for exch pointers indexed by xid */
+ };
+@@ -523,7 +523,7 @@ struct fc_exch *fc_exch_alloc(struct fc_
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+- atomic_inc(&mp->stats.no_free_exch);
++ atomic_inc_unchecked(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+@@ -568,7 +568,7 @@ out:
+ return ep;
+ err:
+ spin_unlock_bh(&mp->em_lock);
+- atomic_inc(&mp->stats.no_free_exch_xid);
++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+ }
+@@ -671,7 +671,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+@@ -701,7 +701,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+- atomic_inc(&mp->stats.xid_busy);
++ atomic_inc_unchecked(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+@@ -712,7 +712,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+@@ -733,7 +733,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
+ goto rel;
+ }
+@@ -1145,22 +1145,22 @@ static void fc_exch_recv_seq_resp(struct
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+@@ -1171,7 +1171,7 @@ static void fc_exch_recv_seq_resp(struct
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ goto rel;
+ }
+ }
+@@ -1230,10 +1230,10 @@ static void fc_exch_recv_resp(struct fc_
+
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+ if (!sp) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ FC_EM_DBG(mp, "seq lookup failed\n");
+ } else {
+- atomic_inc(&mp->stats.non_bls_resp);
++ atomic_inc_unchecked(&mp->stats.non_bls_resp);
+ FC_EM_DBG(mp, "non-BLS response to sequence");
+ }
+ fc_frame_free(fp);
+diff -urNp linux-2.6.31.7/drivers/scsi/libsas/sas_ata.c linux-2.6.31.7/drivers/scsi/libsas/sas_ata.c
+--- linux-2.6.31.7/drivers/scsi/libsas/sas_ata.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/scsi/libsas/sas_ata.c 2009-12-08 17:39:43.941268526 -0500
+@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
+ }
+ }
+
+-static struct ata_port_operations sas_sata_ops = {
++static const struct ata_port_operations sas_sata_ops = {
+ .phy_reset = sas_ata_phy_reset,
+ .post_internal_cmd = sas_ata_post_internal,
+ .qc_prep = ata_noop_qc_prep,
+diff -urNp linux-2.6.31.7/drivers/scsi/scsi_logging.h linux-2.6.31.7/drivers/scsi/scsi_logging.h
+--- linux-2.6.31.7/drivers/scsi/scsi_logging.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/scsi/scsi_logging.h 2009-12-08 17:39:43.941268526 -0500
+@@ -51,7 +51,7 @@ do { \
+ } while (0); \
+ } while (0)
+ #else
+-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
++#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0)
+ #endif /* CONFIG_SCSI_LOGGING */
+
+ /*
+diff -urNp linux-2.6.31.7/drivers/scsi/sg.c linux-2.6.31.7/drivers/scsi/sg.c
+--- linux-2.6.31.7/drivers/scsi/sg.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/scsi/sg.c 2009-12-08 17:39:43.942771763 -0500
+@@ -1185,7 +1185,7 @@ sg_vma_fault(struct vm_area_struct *vma,
+ return VM_FAULT_SIGBUS;
+ }
+
+-static struct vm_operations_struct sg_mmap_vm_ops = {
++static const struct vm_operations_struct sg_mmap_vm_ops = {
+ .fault = sg_vma_fault,
+ };
+
+@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request
+ }
+ }
+
+-static struct file_operations sg_fops = {
++static const struct file_operations sg_fops = {
+ .owner = THIS_MODULE,
+ .read = sg_read,
+ .write = sg_write,
+@@ -2194,8 +2194,11 @@ static int sg_proc_seq_show_int(struct s
+ static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
+ static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *off);
+-static struct file_operations adio_fops = {
+- /* .owner, .read and .llseek added in sg_proc_init() */
++
++static const struct file_operations adio_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_single_open_adio,
+ .write = sg_proc_write_adio,
+ .release = single_release,
+@@ -2204,7 +2207,10 @@ static struct file_operations adio_fops
+ static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
+ static ssize_t sg_proc_write_dressz(struct file *filp,
+ const char __user *buffer, size_t count, loff_t *off);
+-static struct file_operations dressz_fops = {
++static const struct file_operations dressz_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_single_open_dressz,
+ .write = sg_proc_write_dressz,
+ .release = single_release,
+@@ -2212,14 +2218,20 @@ static struct file_operations dressz_fop
+
+ static int sg_proc_seq_show_version(struct seq_file *s, void *v);
+ static int sg_proc_single_open_version(struct inode *inode, struct file *file);
+-static struct file_operations version_fops = {
++static const struct file_operations version_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_single_open_version,
+ .release = single_release,
+ };
+
+ static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
+ static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
+-static struct file_operations devhdr_fops = {
++static const struct file_operations devhdr_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_single_open_devhdr,
+ .release = single_release,
+ };
+@@ -2229,11 +2241,14 @@ static int sg_proc_open_dev(struct inode
+ static void * dev_seq_start(struct seq_file *s, loff_t *pos);
+ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
+ static void dev_seq_stop(struct seq_file *s, void *v);
+-static struct file_operations dev_fops = {
++static const struct file_operations dev_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_open_dev,
+ .release = seq_release,
+ };
+-static struct seq_operations dev_seq_ops = {
++static const struct seq_operations dev_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+@@ -2242,11 +2257,14 @@ static struct seq_operations dev_seq_ops
+
+ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
+ static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
+-static struct file_operations devstrs_fops = {
++static const struct file_operations devstrs_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_open_devstrs,
+ .release = seq_release,
+ };
+-static struct seq_operations devstrs_seq_ops = {
++static const struct seq_operations devstrs_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+@@ -2255,11 +2273,14 @@ static struct seq_operations devstrs_seq
+
+ static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
+ static int sg_proc_open_debug(struct inode *inode, struct file *file);
+-static struct file_operations debug_fops = {
++static const struct file_operations debug_fops = {
++ .owner = THIS_MODULE,
++ .read = seq_read,
++ .llseek = seq_lseek,
+ .open = sg_proc_open_debug,
+ .release = seq_release,
+ };
+-static struct seq_operations debug_seq_ops = {
++static const struct seq_operations debug_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+@@ -2269,7 +2290,7 @@ static struct seq_operations debug_seq_o
+
+ struct sg_proc_leaf {
+ const char * name;
+- struct file_operations * fops;
++ const struct file_operations * fops;
+ };
+
+ static struct sg_proc_leaf sg_proc_leaf_arr[] = {
+@@ -2295,9 +2316,6 @@ sg_proc_init(void)
+ for (k = 0; k < num_leaves; ++k) {
+ leaf = &sg_proc_leaf_arr[k];
+ mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+- leaf->fops->owner = THIS_MODULE;
+- leaf->fops->read = seq_read;
+- leaf->fops->llseek = seq_lseek;
+ proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
+ }
+ return 0;
+diff -urNp linux-2.6.31.7/drivers/serial/8250_pci.c linux-2.6.31.7/drivers/serial/8250_pci.c
+--- linux-2.6.31.7/drivers/serial/8250_pci.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/serial/8250_pci.c 2009-12-08 17:39:43.950655984 -0500
+@@ -3591,7 +3591,7 @@ static struct pci_device_id serial_pci_t
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8,
+ 0xffff00, pbn_default },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ static struct pci_driver serial_pci_driver = {
+diff -urNp linux-2.6.31.7/drivers/serial/kgdboc.c linux-2.6.31.7/drivers/serial/kgdboc.c
+--- linux-2.6.31.7/drivers/serial/kgdboc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/serial/kgdboc.c 2009-12-08 17:39:43.950655984 -0500
+@@ -18,7 +18,7 @@
+
+ #define MAX_CONFIG_LEN 40
+
+-static struct kgdb_io kgdboc_io_ops;
++static const struct kgdb_io kgdboc_io_ops;
+
+ /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+ static int configured = -1;
+@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
+ module_put(THIS_MODULE);
+ }
+
+-static struct kgdb_io kgdboc_io_ops = {
++static const struct kgdb_io kgdboc_io_ops = {
+ .name = "kgdboc",
+ .read_char = kgdboc_get_char,
+ .write_char = kgdboc_put_char,
+diff -urNp linux-2.6.31.7/drivers/spi/spidev.c linux-2.6.31.7/drivers/spi/spidev.c
+--- linux-2.6.31.7/drivers/spi/spidev.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/spi/spidev.c 2009-12-08 17:39:43.950655984 -0500
+@@ -537,7 +537,7 @@ static int spidev_release(struct inode *
+ return status;
+ }
+
+-static struct file_operations spidev_fops = {
++static const struct file_operations spidev_fops = {
+ .owner = THIS_MODULE,
+ /* REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+diff -urNp linux-2.6.31.7/drivers/staging/android/binder.c linux-2.6.31.7/drivers/staging/android/binder.c
+--- linux-2.6.31.7/drivers/staging/android/binder.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/android/binder.c 2009-12-08 17:39:43.951685227 -0500
+@@ -2717,7 +2717,7 @@ static void binder_vma_close(struct vm_a
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+ }
+
+-static struct vm_operations_struct binder_vm_ops = {
++static const struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/staging/b3dfg/b3dfg.c linux-2.6.31.7/drivers/staging/b3dfg/b3dfg.c
+--- linux-2.6.31.7/drivers/staging/b3dfg/b3dfg.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/b3dfg/b3dfg.c 2009-12-08 17:39:43.952782339 -0500
+@@ -454,7 +454,7 @@ static int b3dfg_vma_fault(struct vm_are
+ return VM_FAULT_NOPAGE;
+ }
+
+-static struct vm_operations_struct b3dfg_vm_ops = {
++static const struct vm_operations_struct b3dfg_vm_ops = {
+ .fault = b3dfg_vma_fault,
+ };
+
+@@ -854,7 +854,7 @@ static int b3dfg_mmap(struct file *filp,
+ return r;
+ }
+
+-static struct file_operations b3dfg_fops = {
++static const struct file_operations b3dfg_fops = {
+ .owner = THIS_MODULE,
+ .open = b3dfg_open,
+ .release = b3dfg_release,
+diff -urNp linux-2.6.31.7/drivers/staging/comedi/comedi_fops.c linux-2.6.31.7/drivers/staging/comedi/comedi_fops.c
+--- linux-2.6.31.7/drivers/staging/comedi/comedi_fops.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/comedi/comedi_fops.c 2009-12-08 17:39:43.952782339 -0500
+@@ -1370,7 +1370,7 @@ void comedi_unmap(struct vm_area_struct
+ mutex_unlock(&dev->mutex);
+ }
+
+-static struct vm_operations_struct comedi_vm_ops = {
++static const struct vm_operations_struct comedi_vm_ops = {
+ .close = comedi_unmap,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/staging/cpc-usb/cpc-usb_drv.c linux-2.6.31.7/drivers/staging/cpc-usb/cpc-usb_drv.c
+--- linux-2.6.31.7/drivers/staging/cpc-usb/cpc-usb_drv.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/cpc-usb/cpc-usb_drv.c 2009-12-08 17:39:43.953771551 -0500
+@@ -104,7 +104,7 @@ static void cpcusb_read_interrupt_callba
+
+ static int cpcusb_setup_intrep(CPC_USB_T *card);
+
+-static struct file_operations cpcusb_fops = {
++static const struct file_operations cpcusb_fops = {
+ /*
+ * The owner field is part of the module-locking
+ * mechanism. The idea is that the kernel knows
+diff -urNp linux-2.6.31.7/drivers/staging/epl/EplApiLinuxKernel.c linux-2.6.31.7/drivers/staging/epl/EplApiLinuxKernel.c
+--- linux-2.6.31.7/drivers/staging/epl/EplApiLinuxKernel.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/epl/EplApiLinuxKernel.c 2009-12-08 17:39:43.953771551 -0500
+@@ -203,7 +203,7 @@ static int EplLinIoctl(struct inode *pDe
+ module_init(EplLinInit);
+ module_exit(EplLinExit);
+
+-static struct file_operations EplLinFileOps_g = {
++static const struct file_operations EplLinFileOps_g = {
+ .owner = THIS_MODULE,
+ .open = EplLinOpen,
+ .release = EplLinRelease,
+diff -urNp linux-2.6.31.7/drivers/staging/go7007/go7007-v4l2.c linux-2.6.31.7/drivers/staging/go7007/go7007-v4l2.c
+--- linux-2.6.31.7/drivers/staging/go7007/go7007-v4l2.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/go7007/go7007-v4l2.c 2009-12-08 17:39:43.954688739 -0500
+@@ -1717,7 +1717,7 @@ static int go7007_vm_fault(struct vm_are
+ return 0;
+ }
+
+-static struct vm_operations_struct go7007_vm_ops = {
++static const struct vm_operations_struct go7007_vm_ops = {
+ .open = go7007_vm_open,
+ .close = go7007_vm_close,
+ .fault = go7007_vm_fault,
+diff -urNp linux-2.6.31.7/drivers/staging/panel/panel.c linux-2.6.31.7/drivers/staging/panel/panel.c
+--- linux-2.6.31.7/drivers/staging/panel/panel.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/panel/panel.c 2009-12-08 17:39:43.954688739 -0500
+@@ -1263,7 +1263,7 @@ static int lcd_release(struct inode *ino
+ return 0;
+ }
+
+-static struct file_operations lcd_fops = {
++static const struct file_operations lcd_fops = {
+ .write = lcd_write,
+ .open = lcd_open,
+ .release = lcd_release,
+@@ -1519,7 +1519,7 @@ static int keypad_release(struct inode *
+ return 0;
+ }
+
+-static struct file_operations keypad_fops = {
++static const struct file_operations keypad_fops = {
+ .read = keypad_read, /* read */
+ .open = keypad_open, /* open */
+ .release = keypad_release, /* close */
+diff -urNp linux-2.6.31.7/drivers/staging/pata_rdc/pata_rdc.c linux-2.6.31.7/drivers/staging/pata_rdc/pata_rdc.c
+--- linux-2.6.31.7/drivers/staging/pata_rdc/pata_rdc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/pata_rdc/pata_rdc.c 2009-12-08 17:39:43.955773097 -0500
+@@ -875,7 +875,7 @@ static struct scsi_host_template rdc_pat
+ ATA_BMDMA_SHT(KBUILD_MODNAME),
+ };
+
+-static struct ata_port_operations rdc_pata_ops = {
++static const struct ata_port_operations rdc_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .port_start = rdc_pata_port_start,
+diff -urNp linux-2.6.31.7/drivers/staging/phison/phison.c linux-2.6.31.7/drivers/staging/phison/phison.c
+--- linux-2.6.31.7/drivers/staging/phison/phison.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/phison/phison.c 2009-12-08 17:39:43.955773097 -0500
+@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+-static struct ata_port_operations phison_ops = {
++static const struct ata_port_operations phison_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = phison_pre_reset,
+ };
+diff -urNp linux-2.6.31.7/drivers/staging/poch/poch.c linux-2.6.31.7/drivers/staging/poch/poch.c
+--- linux-2.6.31.7/drivers/staging/poch/poch.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/poch/poch.c 2009-12-08 17:39:43.955773097 -0500
+@@ -1056,7 +1056,7 @@ static int poch_ioctl(struct inode *inod
+ return 0;
+ }
+
+-static struct file_operations poch_fops = {
++static const struct file_operations poch_fops = {
+ .owner = THIS_MODULE,
+ .open = poch_open,
+ .release = poch_release,
+diff -urNp linux-2.6.31.7/drivers/staging/rtl8192su/ieee80211/proc.c linux-2.6.31.7/drivers/staging/rtl8192su/ieee80211/proc.c
+--- linux-2.6.31.7/drivers/staging/rtl8192su/ieee80211/proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/staging/rtl8192su/ieee80211/proc.c 2009-12-08 17:39:43.956775134 -0500
+@@ -87,7 +87,7 @@ static int c_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations crypto_seq_ops = {
++static const struct seq_operations crypto_seq_ops = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+@@ -99,7 +99,7 @@ static int crypto_info_open(struct inode
+ return seq_open(file, &crypto_seq_ops);
+ }
+
+-static struct file_operations proc_crypto_ops = {
++static const struct file_operations proc_crypto_ops = {
+ .open = crypto_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/drivers/uio/uio.c linux-2.6.31.7/drivers/uio/uio.c
+--- linux-2.6.31.7/drivers/uio/uio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/uio/uio.c 2009-12-08 17:39:43.967779996 -0500
+@@ -128,7 +128,7 @@ static ssize_t map_type_show(struct kobj
+ return entry->show(mem, buf);
+ }
+
+-static struct sysfs_ops map_sysfs_ops = {
++static const struct sysfs_ops map_sysfs_ops = {
+ .show = map_type_show,
+ };
+
+@@ -216,7 +216,7 @@ static ssize_t portio_type_show(struct k
+ return entry->show(port, buf);
+ }
+
+-static struct sysfs_ops portio_sysfs_ops = {
++static const struct sysfs_ops portio_sysfs_ops = {
+ .show = portio_type_show,
+ };
+
+@@ -658,7 +658,7 @@ static int uio_vma_fault(struct vm_area_
+ return 0;
+ }
+
+-static struct vm_operations_struct uio_vm_ops = {
++static const struct vm_operations_struct uio_vm_ops = {
+ .open = uio_vma_open,
+ .close = uio_vma_close,
+ .fault = uio_vma_fault,
+diff -urNp linux-2.6.31.7/drivers/usb/atm/usbatm.c linux-2.6.31.7/drivers/usb/atm/usbatm.c
+--- linux-2.6.31.7/drivers/usb/atm/usbatm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/atm/usbatm.c 2009-12-08 17:39:43.968680061 -0500
+@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+ __func__, vpi, vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+
+@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
+ if (length > ATM_MAX_AAL5_PDU) {
+ atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+ __func__, length, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
+ if (sarb->len < pdu_length) {
+ atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+ __func__, pdu_length, sarb->len, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+ atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+ __func__, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+ __func__, length);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
+
+ vcc->push(vcc, skb);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+
+ usbatm_pop(vcc, skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ skb = skb_dequeue(&instance->sndqueue);
+ }
+@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+- atomic_read(&atm_dev->stats.aal5.tx),
+- atomic_read(&atm_dev->stats.aal5.tx_err),
+- atomic_read(&atm_dev->stats.aal5.rx),
+- atomic_read(&atm_dev->stats.aal5.rx_err),
+- atomic_read(&atm_dev->stats.aal5.rx_drop));
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ if (instance->disconnected)
+diff -urNp linux-2.6.31.7/drivers/usb/class/cdc-acm.c linux-2.6.31.7/drivers/usb/class/cdc-acm.c
+--- linux-2.6.31.7/drivers/usb/class/cdc-acm.c 2009-12-08 17:29:51.618713201 -0500
++++ linux-2.6.31.7/drivers/usb/class/cdc-acm.c 2009-12-08 17:39:43.968680061 -0500
+@@ -1528,7 +1528,7 @@ static struct usb_device_id acm_ids[] =
+ USB_CDC_ACM_PROTO_AT_CDMA) },
+
+ /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
+- { }
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(usb, acm_ids);
+diff -urNp linux-2.6.31.7/drivers/usb/class/usblp.c linux-2.6.31.7/drivers/usb/class/usblp.c
+--- linux-2.6.31.7/drivers/usb/class/usblp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/class/usblp.c 2009-12-08 17:39:43.969778844 -0500
+@@ -228,7 +228,7 @@ static const struct quirk_printer_struct
+ { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
+ { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */
+ { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
+- { 0, 0 }
++ { 0, 0, 0 }
+ };
+
+ static int usblp_wwait(struct usblp *usblp, int nonblock);
+@@ -1412,7 +1412,7 @@ static struct usb_device_id usblp_ids []
+ { USB_INTERFACE_INFO(7, 1, 2) },
+ { USB_INTERFACE_INFO(7, 1, 3) },
+ { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */
+- { } /* Terminating entry */
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
+ };
+
+ MODULE_DEVICE_TABLE (usb, usblp_ids);
+diff -urNp linux-2.6.31.7/drivers/usb/class/usbtmc.c linux-2.6.31.7/drivers/usb/class/usbtmc.c
+--- linux-2.6.31.7/drivers/usb/class/usbtmc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/class/usbtmc.c 2009-12-08 17:39:43.969778844 -0500
+@@ -970,7 +970,7 @@ static long usbtmc_ioctl(struct file *fi
+ return retval;
+ }
+
+-static struct file_operations fops = {
++static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .read = usbtmc_read,
+ .write = usbtmc_write,
+diff -urNp linux-2.6.31.7/drivers/usb/core/hcd.c linux-2.6.31.7/drivers/usb/core/hcd.c
+--- linux-2.6.31.7/drivers/usb/core/hcd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/core/hcd.c 2009-12-08 17:39:43.970778632 -0500
+@@ -2199,7 +2199,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
+
+ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+
+-struct usb_mon_operations *mon_ops;
++const struct usb_mon_operations *mon_ops;
+
+ /*
+ * The registration is unlocked.
+@@ -2209,7 +2209,7 @@ struct usb_mon_operations *mon_ops;
+ * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
+ */
+
+-int usb_mon_register (struct usb_mon_operations *ops)
++int usb_mon_register (const struct usb_mon_operations *ops)
+ {
+
+ if (mon_ops)
+diff -urNp linux-2.6.31.7/drivers/usb/core/hcd.h linux-2.6.31.7/drivers/usb/core/hcd.h
+--- linux-2.6.31.7/drivers/usb/core/hcd.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/core/hcd.h 2009-12-08 17:39:43.971677600 -0500
+@@ -487,7 +487,7 @@ struct usb_mon_operations {
+ /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
+ };
+
+-extern struct usb_mon_operations *mon_ops;
++extern const struct usb_mon_operations *mon_ops;
+
+ static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
+ {
+@@ -509,7 +509,7 @@ static inline void usbmon_urb_complete(s
+ (*mon_ops->urb_complete)(bus, urb, status);
+ }
+
+-int usb_mon_register(struct usb_mon_operations *ops);
++int usb_mon_register(const struct usb_mon_operations *ops);
+ void usb_mon_deregister(void);
+
+ #else
+diff -urNp linux-2.6.31.7/drivers/usb/core/hub.c linux-2.6.31.7/drivers/usb/core/hub.c
+--- linux-2.6.31.7/drivers/usb/core/hub.c 2009-12-08 17:29:51.618713201 -0500
++++ linux-2.6.31.7/drivers/usb/core/hub.c 2009-12-08 17:39:43.972777250 -0500
+@@ -3284,7 +3284,7 @@ static struct usb_device_id hub_id_table
+ .bDeviceClass = USB_CLASS_HUB},
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
+ .bInterfaceClass = USB_CLASS_HUB},
+- { } /* Terminating entry */
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
+ };
+
+ MODULE_DEVICE_TABLE (usb, hub_id_table);
+diff -urNp linux-2.6.31.7/drivers/usb/core/inode.c linux-2.6.31.7/drivers/usb/core/inode.c
+--- linux-2.6.31.7/drivers/usb/core/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/core/inode.c 2009-12-08 17:39:43.972777250 -0500
+@@ -48,7 +48,7 @@
+ #define USBFS_DEFAULT_BUSMODE (S_IXUGO | S_IRUGO)
+ #define USBFS_DEFAULT_LISTMODE S_IRUGO
+
+-static struct super_operations usbfs_ops;
++static const struct super_operations usbfs_ops;
+ static const struct file_operations default_file_operations;
+ static struct vfsmount *usbfs_mount;
+ static int usbfs_mount_count; /* = 0 */
+@@ -449,7 +449,7 @@ static const struct file_operations defa
+ .llseek = default_file_lseek,
+ };
+
+-static struct super_operations usbfs_ops = {
++static const struct super_operations usbfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .remount_fs = remount,
+diff -urNp linux-2.6.31.7/drivers/usb/core/message.c linux-2.6.31.7/drivers/usb/core/message.c
+--- linux-2.6.31.7/drivers/usb/core/message.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/core/message.c 2009-12-08 17:39:43.973774839 -0500
+@@ -926,8 +926,8 @@ char *usb_cache_string(struct usb_device
+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
+ if (buf) {
+ len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
+- if (len > 0) {
+- smallbuf = kmalloc(++len, GFP_KERNEL);
++ if (len++ > 0) {
++ smallbuf = kmalloc(len, GFP_KERNEL);
+ if (!smallbuf)
+ return buf;
+ memcpy(smallbuf, buf, len);
+diff -urNp linux-2.6.31.7/drivers/usb/gadget/inode.c linux-2.6.31.7/drivers/usb/gadget/inode.c
+--- linux-2.6.31.7/drivers/usb/gadget/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/gadget/inode.c 2009-12-08 17:39:43.973774839 -0500
+@@ -2033,7 +2033,7 @@ gadgetfs_create_file (struct super_block
+ return inode;
+ }
+
+-static struct super_operations gadget_fs_operations = {
++static const struct super_operations gadget_fs_operations = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ };
+diff -urNp linux-2.6.31.7/drivers/usb/gadget/printer.c linux-2.6.31.7/drivers/usb/gadget/printer.c
+--- linux-2.6.31.7/drivers/usb/gadget/printer.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/gadget/printer.c 2009-12-08 17:39:43.974776264 -0500
+@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned
+ }
+
+ /* used after endpoint configuration */
+-static struct file_operations printer_io_operations = {
++static const struct file_operations printer_io_operations = {
+ .owner = THIS_MODULE,
+ .open = printer_open,
+ .read = printer_read,
+diff -urNp linux-2.6.31.7/drivers/usb/host/ehci-pci.c linux-2.6.31.7/drivers/usb/host/ehci-pci.c
+--- linux-2.6.31.7/drivers/usb/host/ehci-pci.c 2009-12-08 17:29:51.620561513 -0500
++++ linux-2.6.31.7/drivers/usb/host/ehci-pci.c 2009-12-08 17:39:43.974776264 -0500
+@@ -422,7 +422,7 @@ static const struct pci_device_id pci_id
+ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
+ .driver_data = (unsigned long) &ehci_pci_hc_driver,
+ },
+- { /* end: all zeroes */ }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, pci_ids);
+
+diff -urNp linux-2.6.31.7/drivers/usb/host/uhci-hcd.c linux-2.6.31.7/drivers/usb/host/uhci-hcd.c
+--- linux-2.6.31.7/drivers/usb/host/uhci-hcd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/host/uhci-hcd.c 2009-12-08 17:39:43.975775582 -0500
+@@ -927,7 +927,7 @@ static const struct pci_device_id uhci_p
+ /* handle any USB UHCI controller */
+ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
+ .driver_data = (unsigned long) &uhci_driver,
+- }, { /* end: all zeroes */ }
++ }, { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+diff -urNp linux-2.6.31.7/drivers/usb/host/whci/debug.c linux-2.6.31.7/drivers/usb/host/whci/debug.c
+--- linux-2.6.31.7/drivers/usb/host/whci/debug.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/host/whci/debug.c 2009-12-08 17:39:43.975775582 -0500
+@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode,
+ return single_open(file, pzl_print, inode->i_private);
+ }
+
+-static struct file_operations di_fops = {
++static const struct file_operations di_fops = {
+ .open = di_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -142,7 +142,7 @@ static struct file_operations di_fops =
+ .owner = THIS_MODULE,
+ };
+
+-static struct file_operations asl_fops = {
++static const struct file_operations asl_fops = {
+ .open = asl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -150,7 +150,7 @@ static struct file_operations asl_fops =
+ .owner = THIS_MODULE,
+ };
+
+-static struct file_operations pzl_fops = {
++static const struct file_operations pzl_fops = {
+ .open = pzl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/drivers/usb/misc/appledisplay.c linux-2.6.31.7/drivers/usb/misc/appledisplay.c
+--- linux-2.6.31.7/drivers/usb/misc/appledisplay.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/misc/appledisplay.c 2009-12-08 17:39:43.976775438 -0500
+@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
+ return pdata->msgdata[1];
+ }
+
+-static struct backlight_ops appledisplay_bl_data = {
++static const struct backlight_ops appledisplay_bl_data = {
+ .get_brightness = appledisplay_bl_get_brightness,
+ .update_status = appledisplay_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/usb/mon/mon_bin.c linux-2.6.31.7/drivers/usb/mon/mon_bin.c
+--- linux-2.6.31.7/drivers/usb/mon/mon_bin.c 2009-12-08 17:29:51.625740868 -0500
++++ linux-2.6.31.7/drivers/usb/mon/mon_bin.c 2009-12-08 17:39:43.976775438 -0500
+@@ -1187,7 +1187,7 @@ static int mon_bin_vma_fault(struct vm_a
+ return 0;
+ }
+
+-static struct vm_operations_struct mon_bin_vm_ops = {
++static const struct vm_operations_struct mon_bin_vm_ops = {
+ .open = mon_bin_vma_open,
+ .close = mon_bin_vma_close,
+ .fault = mon_bin_vma_fault,
+diff -urNp linux-2.6.31.7/drivers/usb/mon/mon_main.c linux-2.6.31.7/drivers/usb/mon/mon_main.c
+--- linux-2.6.31.7/drivers/usb/mon/mon_main.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/mon/mon_main.c 2009-12-08 17:39:43.976775438 -0500
+@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
+ /*
+ * Ops
+ */
+-static struct usb_mon_operations mon_ops_0 = {
++static const struct usb_mon_operations mon_ops_0 = {
+ .urb_submit = mon_submit,
+ .urb_submit_error = mon_submit_error,
+ .urb_complete = mon_complete,
+diff -urNp linux-2.6.31.7/drivers/usb/storage/debug.h linux-2.6.31.7/drivers/usb/storage/debug.h
+--- linux-2.6.31.7/drivers/usb/storage/debug.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/storage/debug.h 2009-12-08 17:39:43.977775901 -0500
+@@ -54,9 +54,9 @@ void usb_stor_show_sense( unsigned char
+ #define US_DEBUGPX(x...) printk( x )
+ #define US_DEBUG(x) x
+ #else
+-#define US_DEBUGP(x...)
+-#define US_DEBUGPX(x...)
+-#define US_DEBUG(x)
++#define US_DEBUGP(x...) do {} while (0)
++#define US_DEBUGPX(x...) do {} while (0)
++#define US_DEBUG(x) do {} while (0)
+ #endif
+
+ #endif
+diff -urNp linux-2.6.31.7/drivers/usb/storage/usb.c linux-2.6.31.7/drivers/usb/storage/usb.c
+--- linux-2.6.31.7/drivers/usb/storage/usb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/storage/usb.c 2009-12-08 17:39:43.977775901 -0500
+@@ -118,7 +118,7 @@ MODULE_PARM_DESC(quirks, "supplemental l
+
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ # include "unusual_devs.h"
+- { } /* Terminating entry */
++ { NULL, NULL, 0, 0, NULL } /* Terminating entry */
+ };
+
+ #undef UNUSUAL_DEV
+diff -urNp linux-2.6.31.7/drivers/usb/storage/usual-tables.c linux-2.6.31.7/drivers/usb/storage/usual-tables.c
+--- linux-2.6.31.7/drivers/usb/storage/usual-tables.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/usb/storage/usual-tables.c 2009-12-08 17:39:43.977775901 -0500
+@@ -48,7 +48,7 @@
+
+ struct usb_device_id usb_storage_usb_ids[] = {
+ # include "unusual_devs.h"
+- { } /* Terminating entry */
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
+ };
+ EXPORT_SYMBOL_GPL(usb_storage_usb_ids);
+
+diff -urNp linux-2.6.31.7/drivers/uwb/uwb-debug.c linux-2.6.31.7/drivers/uwb/uwb-debug.c
+--- linux-2.6.31.7/drivers/uwb/uwb-debug.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/uwb/uwb-debug.c 2009-12-08 17:39:43.978706469 -0500
+@@ -205,7 +205,7 @@ static ssize_t command_write(struct file
+ return ret < 0 ? ret : len;
+ }
+
+-static struct file_operations command_fops = {
++static const struct file_operations command_fops = {
+ .open = command_open,
+ .write = command_write,
+ .read = NULL,
+@@ -255,7 +255,7 @@ static int reservations_open(struct inod
+ return single_open(file, reservations_print, inode->i_private);
+ }
+
+-static struct file_operations reservations_fops = {
++static const struct file_operations reservations_fops = {
+ .open = reservations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *
+ return single_open(file, drp_avail_print, inode->i_private);
+ }
+
+-static struct file_operations drp_avail_fops = {
++static const struct file_operations drp_avail_fops = {
+ .open = drp_avail_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/drivers/uwb/wlp/messages.c linux-2.6.31.7/drivers/uwb/wlp/messages.c
+--- linux-2.6.31.7/drivers/uwb/wlp/messages.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/uwb/wlp/messages.c 2009-12-08 17:39:43.986525342 -0500
+@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
+ size_t len = skb->len;
+ size_t used;
+ ssize_t result;
+- struct wlp_nonce enonce, rnonce;
++ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
+ enum wlp_assc_error assc_err;
+ char enonce_buf[WLP_WSS_NONCE_STRSIZE];
+ char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
+diff -urNp linux-2.6.31.7/drivers/uwb/wlp/sysfs.c linux-2.6.31.7/drivers/uwb/wlp/sysfs.c
+--- linux-2.6.31.7/drivers/uwb/wlp/sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/uwb/wlp/sysfs.c 2009-12-08 17:39:43.986525342 -0500
+@@ -602,8 +602,7 @@ ssize_t wlp_wss_attr_show(struct kobject
+ * Sysfs operation for forwarding write call to the store method of the
+ * attribute owner
+ */
+-static
+-ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
++static ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
+@@ -615,8 +614,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
+ return ret;
+ }
+
+-static
+-struct sysfs_ops wss_sysfs_ops = {
++static const struct sysfs_ops wss_sysfs_ops = {
+ .show = wlp_wss_attr_show,
+ .store = wlp_wss_attr_store,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/atmel_lcdfb.c linux-2.6.31.7/drivers/video/atmel_lcdfb.c
+--- linux-2.6.31.7/drivers/video/atmel_lcdfb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/atmel_lcdfb.c 2009-12-08 17:39:43.996557315 -0500
+@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
+ return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+ }
+
+-static struct backlight_ops atmel_lcdc_bl_ops = {
++static const struct backlight_ops atmel_lcdc_bl_ops = {
+ .update_status = atmel_bl_update_status,
+ .get_brightness = atmel_bl_get_brightness,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/aty/aty128fb.c linux-2.6.31.7/drivers/video/aty/aty128fb.c
+--- linux-2.6.31.7/drivers/video/aty/aty128fb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/aty/aty128fb.c 2009-12-08 17:39:43.996557315 -0500
+@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops aty128_bl_data = {
++static const struct backlight_ops aty128_bl_data = {
+ .get_brightness = aty128_bl_get_brightness,
+ .update_status = aty128_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/aty/atyfb_base.c linux-2.6.31.7/drivers/video/aty/atyfb_base.c
+--- linux-2.6.31.7/drivers/video/aty/atyfb_base.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/aty/atyfb_base.c 2009-12-08 17:39:43.997777218 -0500
+@@ -2165,7 +2165,7 @@ static int aty_bl_get_brightness(struct
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops aty_bl_data = {
++static const struct backlight_ops aty_bl_data = {
+ .get_brightness = aty_bl_get_brightness,
+ .update_status = aty_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/aty/radeon_backlight.c linux-2.6.31.7/drivers/video/aty/radeon_backlight.c
+--- linux-2.6.31.7/drivers/video/aty/radeon_backlight.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/aty/radeon_backlight.c 2009-12-08 17:39:43.997777218 -0500
+@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops radeon_bl_data = {
++static const struct backlight_ops radeon_bl_data = {
+ .get_brightness = radeon_bl_get_brightness,
+ .update_status = radeon_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.31.7/drivers/video/backlight/atmel-pwm-bl.c
+--- linux-2.6.31.7/drivers/video/backlight/atmel-pwm-bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/atmel-pwm-bl.c 2009-12-08 17:39:44.005617849 -0500
+@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
+ return pwm_channel_enable(&pwmbl->pwmc);
+ }
+
+-static struct backlight_ops atmel_pwm_bl_ops = {
++static const struct backlight_ops atmel_pwm_bl_ops = {
+ .get_brightness = atmel_pwm_bl_get_intensity,
+ .update_status = atmel_pwm_bl_set_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/backlight.c linux-2.6.31.7/drivers/video/backlight/backlight.c
+--- linux-2.6.31.7/drivers/video/backlight/backlight.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/backlight.c 2009-12-08 17:39:44.005617849 -0500
+@@ -227,7 +227,7 @@ static struct device_attribute bl_device
+ * ERR_PTR() or a pointer to the newly allocated device.
+ */
+ struct backlight_device *backlight_device_register(const char *name,
+- struct device *parent, void *devdata, struct backlight_ops *ops)
++ struct device *parent, void *devdata, const struct backlight_ops *ops)
+ {
+ struct backlight_device *new_bd;
+ int rc;
+diff -urNp linux-2.6.31.7/drivers/video/backlight/corgi_lcd.c linux-2.6.31.7/drivers/video/backlight/corgi_lcd.c
+--- linux-2.6.31.7/drivers/video/backlight/corgi_lcd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/corgi_lcd.c 2009-12-08 17:39:44.005617849 -0500
+@@ -450,7 +450,7 @@ void corgi_lcd_limit_intensity(int limit
+ }
+ EXPORT_SYMBOL(corgi_lcd_limit_intensity);
+
+-static struct backlight_ops corgi_bl_ops = {
++static const struct backlight_ops corgi_bl_ops = {
+ .get_brightness = corgi_bl_get_intensity,
+ .update_status = corgi_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/cr_bllcd.c linux-2.6.31.7/drivers/video/backlight/cr_bllcd.c
+--- linux-2.6.31.7/drivers/video/backlight/cr_bllcd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/cr_bllcd.c 2009-12-08 17:39:44.006779182 -0500
+@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
+ return intensity;
+ }
+
+-static struct backlight_ops cr_backlight_ops = {
++static const struct backlight_ops cr_backlight_ops = {
+ .get_brightness = cr_backlight_get_intensity,
+ .update_status = cr_backlight_set_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/da903x_bl.c linux-2.6.31.7/drivers/video/backlight/da903x_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/da903x_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/da903x_bl.c 2009-12-08 17:39:44.006779182 -0500
+@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
+ return data->current_brightness;
+ }
+
+-static struct backlight_ops da903x_backlight_ops = {
++static const struct backlight_ops da903x_backlight_ops = {
+ .update_status = da903x_backlight_update_status,
+ .get_brightness = da903x_backlight_get_brightness,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/generic_bl.c linux-2.6.31.7/drivers/video/backlight/generic_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/generic_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/generic_bl.c 2009-12-08 17:39:44.008774969 -0500
+@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
+ }
+ EXPORT_SYMBOL(corgibl_limit_intensity);
+
+-static struct backlight_ops genericbl_ops = {
++static const struct backlight_ops genericbl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = genericbl_get_intensity,
+ .update_status = genericbl_send_intensity,
+diff -urNp linux-2.6.31.7/drivers/video/backlight/hp680_bl.c linux-2.6.31.7/drivers/video/backlight/hp680_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/hp680_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/hp680_bl.c 2009-12-08 17:39:44.009725047 -0500
+@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
+ return current_intensity;
+ }
+
+-static struct backlight_ops hp680bl_ops = {
++static const struct backlight_ops hp680bl_ops = {
+ .get_brightness = hp680bl_get_intensity,
+ .update_status = hp680bl_set_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/jornada720_bl.c linux-2.6.31.7/drivers/video/backlight/jornada720_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/jornada720_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/jornada720_bl.c 2009-12-08 17:39:44.009725047 -0500
+@@ -93,7 +93,7 @@ out:
+ return ret;
+ }
+
+-static struct backlight_ops jornada_bl_ops = {
++static const struct backlight_ops jornada_bl_ops = {
+ .get_brightness = jornada_bl_get_brightness,
+ .update_status = jornada_bl_update_status,
+ .options = BL_CORE_SUSPENDRESUME,
+diff -urNp linux-2.6.31.7/drivers/video/backlight/kb3886_bl.c linux-2.6.31.7/drivers/video/backlight/kb3886_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/kb3886_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/kb3886_bl.c 2009-12-08 17:39:44.009725047 -0500
+@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
+ return kb3886bl_intensity;
+ }
+
+-static struct backlight_ops kb3886bl_ops = {
++static const struct backlight_ops kb3886bl_ops = {
+ .get_brightness = kb3886bl_get_intensity,
+ .update_status = kb3886bl_send_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/locomolcd.c linux-2.6.31.7/drivers/video/backlight/locomolcd.c
+--- linux-2.6.31.7/drivers/video/backlight/locomolcd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/locomolcd.c 2009-12-08 17:39:44.010714873 -0500
+@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
+ return current_intensity;
+ }
+
+-static struct backlight_ops locomobl_data = {
++static const struct backlight_ops locomobl_data = {
+ .get_brightness = locomolcd_get_intensity,
+ .update_status = locomolcd_set_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.31.7/drivers/video/backlight/mbp_nvidia_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/mbp_nvidia_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/mbp_nvidia_bl.c 2009-12-08 17:39:44.010714873 -0500
+@@ -33,7 +33,7 @@ struct dmi_match_data {
+ unsigned long iostart;
+ unsigned long iolen;
+ /* Backlight operations structure. */
+- struct backlight_ops backlight_ops;
++ const struct backlight_ops backlight_ops;
+ };
+
+ /* Module parameters. */
+diff -urNp linux-2.6.31.7/drivers/video/backlight/omap1_bl.c linux-2.6.31.7/drivers/video/backlight/omap1_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/omap1_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/omap1_bl.c 2009-12-08 17:39:44.010714873 -0500
+@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
+ return bl->current_intensity;
+ }
+
+-static struct backlight_ops omapbl_ops = {
++static const struct backlight_ops omapbl_ops = {
+ .get_brightness = omapbl_get_intensity,
+ .update_status = omapbl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/progear_bl.c linux-2.6.31.7/drivers/video/backlight/progear_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/progear_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/progear_bl.c 2009-12-08 17:39:44.011665171 -0500
+@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
+ return intensity - HW_LEVEL_MIN;
+ }
+
+-static struct backlight_ops progearbl_ops = {
++static const struct backlight_ops progearbl_ops = {
+ .get_brightness = progearbl_get_intensity,
+ .update_status = progearbl_set_intensity,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/pwm_bl.c linux-2.6.31.7/drivers/video/backlight/pwm_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/pwm_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/pwm_bl.c 2009-12-08 17:39:44.011665171 -0500
+@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
+ return bl->props.brightness;
+ }
+
+-static struct backlight_ops pwm_backlight_ops = {
++static const struct backlight_ops pwm_backlight_ops = {
+ .update_status = pwm_backlight_update_status,
+ .get_brightness = pwm_backlight_get_brightness,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/backlight/tosa_bl.c linux-2.6.31.7/drivers/video/backlight/tosa_bl.c
+--- linux-2.6.31.7/drivers/video/backlight/tosa_bl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/backlight/tosa_bl.c 2009-12-08 17:39:44.012707415 -0500
+@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
+ return props->brightness;
+ }
+
+-static struct backlight_ops bl_ops = {
++static const struct backlight_ops bl_ops = {
+ .get_brightness = tosa_bl_get_brightness,
+ .update_status = tosa_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/bf54x-lq043fb.c linux-2.6.31.7/drivers/video/bf54x-lq043fb.c
+--- linux-2.6.31.7/drivers/video/bf54x-lq043fb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/bf54x-lq043fb.c 2009-12-08 17:39:44.012707415 -0500
+@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
+ return 0;
+ }
+
+-static struct backlight_ops bfin_lq043fb_bl_ops = {
++static const struct backlight_ops bfin_lq043fb_bl_ops = {
+ .get_brightness = bl_get_brightness,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/video/bfin-t350mcqb-fb.c linux-2.6.31.7/drivers/video/bfin-t350mcqb-fb.c
+--- linux-2.6.31.7/drivers/video/bfin-t350mcqb-fb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/bfin-t350mcqb-fb.c 2009-12-08 17:39:44.012707415 -0500
+@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
+ return 0;
+ }
+
+-static struct backlight_ops bfin_lq043fb_bl_ops = {
++static const struct backlight_ops bfin_lq043fb_bl_ops = {
+ .get_brightness = bl_get_brightness,
+ };
+
+diff -urNp linux-2.6.31.7/drivers/video/fb_defio.c linux-2.6.31.7/drivers/video/fb_defio.c
+--- linux-2.6.31.7/drivers/video/fb_defio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/fb_defio.c 2009-12-08 17:39:44.013747583 -0500
+@@ -125,7 +125,7 @@ page_already_added:
+ return 0;
+ }
+
+-static struct vm_operations_struct fb_deferred_io_vm_ops = {
++static const struct vm_operations_struct fb_deferred_io_vm_ops = {
+ .fault = fb_deferred_io_fault,
+ .page_mkwrite = fb_deferred_io_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/fbmem.c linux-2.6.31.7/drivers/video/fbmem.c
+--- linux-2.6.31.7/drivers/video/fbmem.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/fbmem.c 2009-12-08 17:39:44.013747583 -0500
+@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+- for (x = 0; x < num && image->dx >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+- for (x = 0; x < num && image->dy >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+@@ -1108,7 +1108,7 @@ static long do_fb_ioctl(struct fb_info *
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
++ if (con2fb.framebuffer >= FB_MAX)
+ return -EINVAL;
+ if (!registered_fb[con2fb.framebuffer])
+ request_module("fb%d", con2fb.framebuffer);
+diff -urNp linux-2.6.31.7/drivers/video/fbmon.c linux-2.6.31.7/drivers/video/fbmon.c
+--- linux-2.6.31.7/drivers/video/fbmon.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/fbmon.c 2009-12-08 17:39:44.014714054 -0500
+@@ -45,7 +45,7 @@
+ #ifdef DEBUG
+ #define DPRINTK(fmt, args...) printk(fmt,## args)
+ #else
+-#define DPRINTK(fmt, args...)
++#define DPRINTK(fmt, args...) do {} while (0)
+ #endif
+
+ #define FBMON_FIX_HEADER 1
+diff -urNp linux-2.6.31.7/drivers/video/i810/i810_accel.c linux-2.6.31.7/drivers/video/i810/i810_accel.c
+--- linux-2.6.31.7/drivers/video/i810/i810_accel.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/i810/i810_accel.c 2009-12-08 17:39:44.024785415 -0500
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
+ }
+ }
+ printk("ringbuffer lockup!!!\n");
++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+ i810_report_error(mmio);
+ par->dev_flags |= LOCKUP;
+ info->pixmap.scan_align = 1;
+diff -urNp linux-2.6.31.7/drivers/video/i810/i810_main.c linux-2.6.31.7/drivers/video/i810/i810_main.c
+--- linux-2.6.31.7/drivers/video/i810/i810_main.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/i810/i810_main.c 2009-12-08 17:39:44.036719872 -0500
+@@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+- { 0 },
++ { 0, 0, 0, 0, 0, 0, 0 },
+ };
+
+ static struct pci_driver i810fb_driver = {
+diff -urNp linux-2.6.31.7/drivers/video/modedb.c linux-2.6.31.7/drivers/video/modedb.c
+--- linux-2.6.31.7/drivers/video/modedb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/modedb.c 2009-12-08 17:39:44.037787004 -0500
+@@ -38,240 +38,240 @@ static const struct fb_videomode modedb[
+ {
+ /* 640x400 @ 70 Hz, 31.5 kHz hsync */
+ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
+ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x600 @ 56 Hz, 35.15 kHz hsync */
+ NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
+ NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
+- 0, FB_VMODE_INTERLACED
++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x400 @ 85 Hz, 37.86 kHz hsync */
+ NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
+- FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x480 @ 72 Hz, 36.5 kHz hsync */
+ NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x480 @ 75 Hz, 37.50 kHz hsync */
+ NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x600 @ 60 Hz, 37.8 kHz hsync */
+ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x480 @ 85 Hz, 43.27 kHz hsync */
+ NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
+ NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
+- 0, FB_VMODE_INTERLACED
++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x600 @ 72 Hz, 48.0 kHz hsync */
+ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 60 Hz, 48.4 kHz hsync */
+ NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 640x480 @ 100 Hz, 53.01 kHz hsync */
+ NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 60 Hz, 53.5 kHz hsync */
+ NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x600 @ 85 Hz, 55.84 kHz hsync */
+ NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 70 Hz, 56.5 kHz hsync */
+ NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
+ NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
+- 0, FB_VMODE_INTERLACED
++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x600 @ 100 Hz, 64.02 kHz hsync */
+ NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 76 Hz, 62.5 kHz hsync */
+ NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 70 Hz, 62.4 kHz hsync */
+ NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
+ NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1400x1050 @ 60Hz, 63.9 kHz hsync */
+ NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
+ NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
+ NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 85 Hz, 70.24 kHz hsync */
+ NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 78 Hz, 70.8 kHz hsync */
+ NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
+ NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1600x1200 @ 60Hz, 75.00 kHz hsync */
+ NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 84 Hz, 76.0 kHz hsync */
+ NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
+ NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1024x768 @ 100Hz, 80.21 kHz hsync */
+ NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
+ NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
+ NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x864 @ 100 Hz, 89.62 kHz hsync */
+ NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
+ NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
+ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
+ NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
+ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
+ NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1800x1440 @ 64Hz, 96.15 kHz hsync */
+ NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1800x1440 @ 70Hz, 104.52 kHz hsync */
+ NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 512x384 @ 78 Hz, 31.50 kHz hsync */
+ NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 512x384 @ 85 Hz, 34.38 kHz hsync */
+ NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
+ NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
+ NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 320x240 @ 72 Hz, 36.5 kHz hsync */
+ NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
+ NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 400x300 @ 60 Hz, 37.8 kHz hsync */
+ NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 400x300 @ 72 Hz, 48.0 kHz hsync */
+ NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
+ NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 480x300 @ 60 Hz, 37.8 kHz hsync */
+ NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 480x300 @ 63 Hz, 39.6 kHz hsync */
+ NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 480x300 @ 72 Hz, 48.0 kHz hsync */
+ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
+- 0, FB_VMODE_DOUBLE
++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
+ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+- FB_VMODE_NONINTERLACED
++ FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
+ NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
+- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
+ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
+ NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
+- 0, FB_VMODE_NONINTERLACED
++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
+- 0, FB_VMODE_INTERLACED
++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
+ }, {
+ /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
+- 0, FB_VMODE_INTERLACED
++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
+ },
+ };
+
+diff -urNp linux-2.6.31.7/drivers/video/nvidia/nv_backlight.c linux-2.6.31.7/drivers/video/nvidia/nv_backlight.c
+--- linux-2.6.31.7/drivers/video/nvidia/nv_backlight.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/nvidia/nv_backlight.c 2009-12-08 17:39:44.037787004 -0500
+@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops nvidia_bl_ops = {
++static const struct backlight_ops nvidia_bl_ops = {
+ .get_brightness = nvidia_bl_get_brightness,
+ .update_status = nvidia_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/omap/dispc.c linux-2.6.31.7/drivers/video/omap/dispc.c
+--- linux-2.6.31.7/drivers/video/omap/dispc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/omap/dispc.c 2009-12-08 17:39:44.037787004 -0500
+@@ -1013,7 +1013,7 @@ static void mmap_user_close(struct vm_ar
+ atomic_dec(&dispc.map_count[plane]);
+ }
+
+-static struct vm_operations_struct mmap_user_ops = {
++static const struct vm_operations_struct mmap_user_ops = {
+ .open = mmap_user_open,
+ .close = mmap_user_close,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/riva/fbdev.c linux-2.6.31.7/drivers/video/riva/fbdev.c
+--- linux-2.6.31.7/drivers/video/riva/fbdev.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/riva/fbdev.c 2009-12-08 17:39:44.038658181 -0500
+@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
+ return bd->props.brightness;
+ }
+
+-static struct backlight_ops riva_bl_ops = {
++static const struct backlight_ops riva_bl_ops = {
+ .get_brightness = riva_bl_get_brightness,
+ .update_status = riva_bl_update_status,
+ };
+diff -urNp linux-2.6.31.7/drivers/video/uvesafb.c linux-2.6.31.7/drivers/video/uvesafb.c
+--- linux-2.6.31.7/drivers/video/uvesafb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/uvesafb.c 2009-12-08 17:39:44.038658181 -0500
+@@ -18,6 +18,7 @@
+ #include <linux/fb.h>
+ #include <linux/io.h>
+ #include <linux/mutex.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
+ NULL,
+ };
+
+- return call_usermodehelper(v86d_path, argv, envp, 1);
++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
+ }
+
+ /*
+@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
+ if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+ par->pmi_setpal = par->ypan = 0;
+ } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++ if (!par->pmi_code) {
++ par->pmi_setpal = par->ypan = 0;
++ return 0;
++ }
++#endif
++
+ par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++ pax_close_kernel();
++
++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
++#else
+ par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+ par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+ printk(KERN_INFO "uvesafb: protected mode interface info at "
+ "%04x:%04x\n",
+ (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -1827,6 +1850,11 @@ out:
+ if (par->vbe_modes)
+ kfree(par->vbe_modes);
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ framebuffer_release(info);
+ return err;
+ }
+@@ -1853,6 +1881,12 @@ static int uvesafb_remove(struct platfor
+ kfree(par->vbe_state_orig);
+ if (par->vbe_state_saved)
+ kfree(par->vbe_state_saved);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ }
+
+ framebuffer_release(info);
+diff -urNp linux-2.6.31.7/drivers/video/vesafb.c linux-2.6.31.7/drivers/video/vesafb.c
+--- linux-2.6.31.7/drivers/video/vesafb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/video/vesafb.c 2009-12-08 17:39:44.039684633 -0500
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
+ static int vram_total __initdata; /* Set total amount of memory */
+ static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
+ static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
+-static void (*pmi_start)(void) __read_mostly;
+-static void (*pmi_pal) (void) __read_mostly;
++static void (*pmi_start)(void) __read_only;
++static void (*pmi_pal) (void) __read_only;
+ static int depth __read_mostly;
+ static int vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
++ void *pmi_code = NULL;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ return -ENODEV;
+@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
+ size_remap = size_total;
+ vesafb_fix.smem_len = size_remap;
+
+-#ifndef __i386__
+- screen_info.vesapm_seg = 0;
+-#endif
+-
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+ printk(KERN_WARNING
+ "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
+ printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_code = module_alloc_exec(screen_info.vesapm_size);
++ if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++ if (0)
++#endif
++
++#endif
++ screen_info.vesapm_seg = 0;
++
+ if (screen_info.vesapm_seg) {
+- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+- screen_info.vesapm_seg,screen_info.vesapm_off);
++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+ }
+
+ if (screen_info.vesapm_seg < 0xc000)
+@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
+
+ if (ypan || pmi_setpal) {
+ unsigned short *pmi_base;
+- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
++
++ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++ pmi_code = pmi_base;
++#endif
++
++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_start = ktva_ktla(pmi_start);
++ pmi_pal = ktva_ktla(pmi_pal);
++ pax_close_kernel();
++#endif
++
+ printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ if (pmi_base[3]) {
+ printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
+ info->node, info->fix.id);
+ return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ module_free_exec(NULL, pmi_code);
++#endif
++
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+diff -urNp linux-2.6.31.7/drivers/xen/sys-hypervisor.c linux-2.6.31.7/drivers/xen/sys-hypervisor.c
+--- linux-2.6.31.7/drivers/xen/sys-hypervisor.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/drivers/xen/sys-hypervisor.c 2009-12-08 17:39:44.039684633 -0500
+@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
+ return 0;
+ }
+
+-static struct sysfs_ops hyp_sysfs_ops = {
++static const struct sysfs_ops hyp_sysfs_ops = {
+ .show = hyp_sysfs_show,
+ .store = hyp_sysfs_store,
+ };
+diff -urNp linux-2.6.31.7/fs/9p/vfs_inode.c linux-2.6.31.7/fs/9p/vfs_inode.c
+--- linux-2.6.31.7/fs/9p/vfs_inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/9p/vfs_inode.c 2009-12-08 17:39:44.039684633 -0500
+@@ -1025,7 +1025,7 @@ static void *v9fs_vfs_follow_link(struct
+ static void
+ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
+ IS_ERR(s) ? "<error>" : s);
+diff -urNp linux-2.6.31.7/fs/afs/flock.c linux-2.6.31.7/fs/afs/flock.c
+--- linux-2.6.31.7/fs/afs/flock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/afs/flock.c 2009-12-08 17:39:44.040710543 -0500
+@@ -21,7 +21,7 @@ static void afs_fl_release_private(struc
+ static struct workqueue_struct *afs_lock_manager;
+ static DEFINE_MUTEX(afs_lock_manager_mutex);
+
+-static struct file_lock_operations afs_lock_ops = {
++static const struct file_lock_operations afs_lock_ops = {
+ .fl_copy_lock = afs_fl_copy_lock,
+ .fl_release_private = afs_fl_release_private,
+ };
+diff -urNp linux-2.6.31.7/fs/afs/proc.c linux-2.6.31.7/fs/afs/proc.c
+--- linux-2.6.31.7/fs/afs/proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/afs/proc.c 2009-12-08 17:39:44.040710543 -0500
+@@ -28,7 +28,7 @@ static int afs_proc_cells_show(struct se
+ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *_pos);
+
+-static struct seq_operations afs_proc_cells_ops = {
++static const struct seq_operations afs_proc_cells_ops = {
+ .start = afs_proc_cells_start,
+ .next = afs_proc_cells_next,
+ .stop = afs_proc_cells_stop,
+@@ -70,7 +70,7 @@ static void *afs_proc_cell_volumes_next(
+ static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
+ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
+
+-static struct seq_operations afs_proc_cell_volumes_ops = {
++static const struct seq_operations afs_proc_cell_volumes_ops = {
+ .start = afs_proc_cell_volumes_start,
+ .next = afs_proc_cell_volumes_next,
+ .stop = afs_proc_cell_volumes_stop,
+@@ -95,7 +95,7 @@ static void *afs_proc_cell_vlservers_nex
+ static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
+ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
+
+-static struct seq_operations afs_proc_cell_vlservers_ops = {
++static const struct seq_operations afs_proc_cell_vlservers_ops = {
+ .start = afs_proc_cell_vlservers_start,
+ .next = afs_proc_cell_vlservers_next,
+ .stop = afs_proc_cell_vlservers_stop,
+@@ -119,7 +119,7 @@ static void *afs_proc_cell_servers_next(
+ static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
+ static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
+
+-static struct seq_operations afs_proc_cell_servers_ops = {
++static const struct seq_operations afs_proc_cell_servers_ops = {
+ .start = afs_proc_cell_servers_start,
+ .next = afs_proc_cell_servers_next,
+ .stop = afs_proc_cell_servers_stop,
+diff -urNp linux-2.6.31.7/fs/aio.c linux-2.6.31.7/fs/aio.c
+--- linux-2.6.31.7/fs/aio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/aio.c 2009-12-08 17:39:44.041787673 -0500
+@@ -114,7 +114,7 @@ static int aio_setup_ring(struct kioctx
+ size += sizeof(struct io_event) * nr_events;
+ nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+- if (nr_pages < 0)
++ if (nr_pages <= 0)
+ return -EINVAL;
+
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+diff -urNp linux-2.6.31.7/fs/autofs/root.c linux-2.6.31.7/fs/autofs/root.c
+--- linux-2.6.31.7/fs/autofs/root.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/autofs/root.c 2009-12-08 17:39:44.041787673 -0500
+@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
+ set_bit(n,sbi->symlink_bitmap);
+ sl = &sbi->symlink[n];
+ sl->len = strlen(symname);
+- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
++ slsize = sl->len+1;
++ sl->data = kmalloc(slsize, GFP_KERNEL);
+ if (!sl->data) {
+ clear_bit(n,sbi->symlink_bitmap);
+ unlock_kernel();
+diff -urNp linux-2.6.31.7/fs/autofs4/symlink.c linux-2.6.31.7/fs/autofs4/symlink.c
+--- linux-2.6.31.7/fs/autofs4/symlink.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/autofs4/symlink.c 2009-12-08 17:39:44.041787673 -0500
+@@ -15,7 +15,7 @@
+ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
+ {
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+- nd_set_link(nd, (char *)ino->u.symlink);
++ nd_set_link(nd, ino->u.symlink);
+ return NULL;
+ }
+
+diff -urNp linux-2.6.31.7/fs/befs/linuxvfs.c linux-2.6.31.7/fs/befs/linuxvfs.c
+--- linux-2.6.31.7/fs/befs/linuxvfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/befs/linuxvfs.c 2009-12-08 17:39:44.042782238 -0500
+@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
+ {
+ befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff -urNp linux-2.6.31.7/fs/binfmt_aout.c linux-2.6.31.7/fs/binfmt_aout.c
+--- linux-2.6.31.7/fs/binfmt_aout.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/binfmt_aout.c 2009-12-08 17:39:44.042782238 -0500
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/ptrace.h>
+@@ -113,10 +114,12 @@ static int aout_core_dump(long signr, st
+
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
+ dump.u_dsize = 0;
+
+ /* Make sure we have enough room to write the stack and data areas. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
+ if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
+ dump.u_ssize = 0;
+
+@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
++
++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+@@ -276,6 +281,27 @@ static int load_aout_binary(struct linux
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+ loff_t pos;
+@@ -348,7 +374,7 @@ static int load_aout_binary(struct linux
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+diff -urNp linux-2.6.31.7/fs/binfmt_elf.c linux-2.6.31.7/fs/binfmt_elf.c
+--- linux-2.6.31.7/fs/binfmt_elf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/binfmt_elf.c 2009-12-08 17:39:44.043665858 -0500
+@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
+ #define elf_core_dump NULL
+ #endif
+
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
+ .load_binary = load_elf_binary,
+ .load_shlib = load_elf_library,
+ .core_dump = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++ .handle_mprotect= elf_handle_mprotect,
++#endif
++
+ .min_coredump = ELF_EXEC_PAGESIZE,
+ .hasvdso = 1
+ };
+@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ unsigned long e = end;
++
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
+ if (BAD_ADDR(addr))
+ return addr;
+ }
+- current->mm->start_brk = current->mm->brk = end;
++ current->mm->start_brk = current->mm->brk = e;
+ return 0;
+ }
+
+@@ -148,7 +159,7 @@ create_elf_tables(struct linux_binprm *b
+ elf_addr_t __user *u_rand_bytes;
+ const char *k_platform = ELF_PLATFORM;
+ const char *k_base_platform = ELF_BASE_PLATFORM;
+- unsigned char k_rand_bytes[16];
++ u32 k_rand_bytes[4];
+ int items;
+ elf_addr_t *elf_info;
+ int ei_index = 0;
+@@ -195,6 +206,10 @@ create_elf_tables(struct linux_binprm *b
+ * Generate 16 random bytes for userspace PRNG seeding.
+ */
+ get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
++ srandom32(k_rand_bytes[0] ^ random32());
++ srandom32(k_rand_bytes[1] ^ random32());
++ srandom32(k_rand_bytes[2] ^ random32());
++ srandom32(k_rand_bytes[3] ^ random32());
+ u_rand_bytes = (elf_addr_t __user *)
+ STACK_ALLOC(p, sizeof(k_rand_bytes));
+ if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+@@ -385,10 +400,10 @@ static unsigned long load_elf_interp(str
+ {
+ struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
+- unsigned long load_addr = 0;
++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+ int load_addr_set = 0;
+ unsigned long last_bss = 0, elf_bss = 0;
+- unsigned long error = ~0UL;
++ unsigned long error = -EINVAL;
+ unsigned long total_size;
+ int retval, i, size;
+
+@@ -434,6 +449,11 @@ static unsigned long load_elf_interp(str
+ goto out_close;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+ eppnt = elf_phdata;
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+@@ -477,8 +497,8 @@ static unsigned long load_elf_interp(str
+ k = load_addr + eppnt->p_vaddr;
+ if (BAD_ADDR(k) ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+- eppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - eppnt->p_memsz < k) {
++ eppnt->p_memsz > pax_task_size ||
++ pax_task_size - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+@@ -532,6 +552,177 @@ out:
+ return error;
+ }
+
++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if (nx_enabled)
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if (nx_enabled)
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if (nx_enabled)
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++ pax_flags = pax_parse_ei_pax(elf_ex);
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++ return -EINVAL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->mm->pax_flags = pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -548,6 +739,11 @@ static unsigned long randomize_stack_top
+ {
+ unsigned int random_variable = 0;
+
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ return stack_top - current->mm->delta_stack;
++#endif
++
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+ random_variable = get_random_int() & STACK_RND_MASK;
+@@ -566,7 +762,7 @@ static int load_elf_binary(struct linux_
+ unsigned long load_addr = 0, load_bias = 0;
+ int load_addr_set = 0;
+ char * elf_interpreter = NULL;
+- unsigned long error;
++ unsigned long error = 0;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
+ unsigned long elf_bss, elf_brk;
+ int retval, i;
+@@ -576,11 +772,11 @@ static int load_elf_binary(struct linux_
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long reloc_func_desc = 0;
+ int executable_stack = EXSTACK_DEFAULT;
+- unsigned long def_flags = 0;
+ struct {
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ } *loc;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+@@ -742,11 +938,80 @@ static int load_elf_binary(struct linux_
+
+ /* OK, This is the point of no return */
+ current->flags &= ~PF_FORKNOEXEC;
+- current->mm->def_flags = def_flags;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
++ current->mm->def_flags = 0;
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_initial_flags_func)
++ (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
++ current->mm->context.user_cs_limit = PAGE_SIZE;
++ current->mm->def_flags |= VM_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++ }
++#endif
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++ put_cpu();
++ }
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++ }
++#endif
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ executable_stack = EXSTACK_DISABLE_X;
++ current->personality &= ~READ_IMPLIES_EXEC;
++ } else
++#endif
++
+ if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+@@ -827,6 +1092,20 @@ static int load_elf_binary(struct linux_
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+@@ -859,9 +1138,9 @@ static int load_elf_binary(struct linux_
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsz so it is only necessary to check p_memsz.
+ */
+- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+- elf_ppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - elf_ppnt->p_memsz < k) {
++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++ elf_ppnt->p_memsz > pax_task_size ||
++ pax_task_size - elf_ppnt->p_memsz < k) {
+ /* set_brk can never work. Avoid overflows. */
+ send_sig(SIGKILL, current, 0);
+ retval = -EINVAL;
+@@ -889,6 +1168,11 @@ static int load_elf_binary(struct linux_
+ start_data += load_bias;
+ end_data += load_bias;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
++#endif
++
+ /* Calling set_brk effectively mmaps the pages that we need
+ * for the bss and break sections. We must do this before
+ * mapping in the interpreter, to make sure it doesn't wind
+@@ -900,9 +1184,11 @@ static int load_elf_binary(struct linux_
+ goto out_free_dentry;
+ }
+ if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+- send_sig(SIGSEGV, current, 0);
+- retval = -EFAULT; /* Nobody gets to see this, but.. */
+- goto out_free_dentry;
++ /*
++ * This bss-zeroing can fail if the ELF
++ * file specifies odd protections. So
++ * we don't check the return value
++ */
+ }
+
+ if (elf_interpreter) {
+@@ -1135,8 +1421,10 @@ static int dump_seek(struct file *file,
+ unsigned long n = off;
+ if (n > PAGE_SIZE)
+ n = PAGE_SIZE;
+- if (!dump_write(file, buf, n))
++ if (!dump_write(file, buf, n)) {
++ free_page((unsigned long)buf);
+ return 0;
++ }
+ off -= n;
+ }
+ free_page((unsigned long)buf);
+@@ -1148,7 +1436,7 @@ static int dump_seek(struct file *file,
+ * Decide what to dump of a segment, part, all or none.
+ */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+- unsigned long mm_flags)
++ unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
+
+@@ -1182,7 +1470,7 @@ static unsigned long vma_dump_size(struc
+ if (vma->vm_file == NULL)
+ return 0;
+
+- if (FILTER(MAPPED_PRIVATE))
++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+ goto whole;
+
+ /*
+@@ -1278,8 +1566,11 @@ static int writenote(struct memelfnote *
+ #undef DUMP_WRITE
+
+ #define DUMP_WRITE(addr, nr) \
++ do { \
++ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
+ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+- goto end_coredump;
++ goto end_coredump; \
++ } while (0);
+ #define DUMP_SEEK(off) \
+ if (!dump_seek(file, (off))) \
+ goto end_coredump;
+@@ -1411,9 +1702,9 @@ static void fill_auxv_note(struct memelf
+ {
+ elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+ int i = 0;
+- do
++ do {
+ i += 2;
+- while (auxv[i - 2] != AT_NULL);
++ } while (auxv[i - 2] != AT_NULL);
+ fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+
+@@ -1991,7 +2282,7 @@ static int elf_core_dump(long signr, str
+ phdr.p_offset = offset;
+ phdr.p_vaddr = vma->vm_start;
+ phdr.p_paddr = 0;
+- phdr.p_filesz = vma_dump_size(vma, mm_flags);
++ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
+ phdr.p_memsz = vma->vm_end - vma->vm_start;
+ offset += phdr.p_filesz;
+ phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+@@ -2023,7 +2314,7 @@ static int elf_core_dump(long signr, str
+ unsigned long addr;
+ unsigned long end;
+
+- end = vma->vm_start + vma_dump_size(vma, mm_flags);
++ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
+
+ for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+@@ -2043,6 +2334,7 @@ static int elf_core_dump(long signr, str
+ flush_cache_page(tmp_vma, addr,
+ page_to_pfn(page));
+ kaddr = kmap(page);
++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
+ if ((size += PAGE_SIZE) > limit ||
+ !dump_write(file, kaddr,
+ PAGE_SIZE)) {
+@@ -2073,6 +2365,97 @@ out:
+
+ #endif /* USE_ELF_CORE_DUMP */
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++ unsigned long oldflags;
++ bool is_textrel_rw, is_textrel_rx, is_relro;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++ return;
++
++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_NOELFRELOCS
++ is_textrel_rw = false;
++ is_textrel_rx = false;
++#else
++ /* possible TEXTREL */
++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#endif
++
++ /* possible RELRO */
++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ switch (elf_p.p_type) {
++ case PT_DYNAMIC:
++ if (!is_textrel_rw && !is_textrel_rx)
++ continue;
++ i = 0UL;
++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++ elf_dyn dyn;
++
++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_NULL)
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ gr_log_textrel(vma);
++ if (is_textrel_rw)
++ vma->vm_flags |= VM_MAYWRITE;
++ else
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ i++;
++ }
++ return;
++
++ case PT_GNU_RELRO:
++ if (!is_relro)
++ continue;
++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+ return register_binfmt(&elf_format);
+diff -urNp linux-2.6.31.7/fs/binfmt_flat.c linux-2.6.31.7/fs/binfmt_flat.c
+--- linux-2.6.31.7/fs/binfmt_flat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/binfmt_flat.c 2009-12-08 17:39:44.043665858 -0500
+@@ -565,7 +565,9 @@ static int load_flat_file(struct linux_b
+ realdatastart = (unsigned long) -ENOMEM;
+ printk("Unable to allocate RAM for process data, errno %d\n",
+ (int)-realdatastart);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
++ up_write(&current->mm->mmap_sem);
+ ret = realdatastart;
+ goto err;
+ }
+@@ -589,8 +591,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (result >= (unsigned long)-4096) {
+ printk("Unable to read data+bss, errno %d\n", (int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
+ do_munmap(current->mm, realdatastart, data_len + extra);
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+@@ -659,8 +663,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (result >= (unsigned long)-4096) {
+ printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(unsigned long));
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+diff -urNp linux-2.6.31.7/fs/binfmt_misc.c linux-2.6.31.7/fs/binfmt_misc.c
+--- linux-2.6.31.7/fs/binfmt_misc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/binfmt_misc.c 2009-12-08 17:39:44.044654416 -0500
+@@ -693,7 +693,7 @@ static int bm_fill_super(struct super_bl
+ static struct tree_descr bm_files[] = {
+ [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
+ [3] = {"register", &bm_register_operations, S_IWUSR},
+- /* last one */ {""}
++ /* last one */ {"", NULL, 0}
+ };
+ int err = simple_fill_super(sb, 0x42494e4d, bm_files);
+ if (!err)
+diff -urNp linux-2.6.31.7/fs/bio.c linux-2.6.31.7/fs/bio.c
+--- linux-2.6.31.7/fs/bio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/bio.c 2009-12-08 17:39:44.049792727 -0500
+@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
+
+ i = 0;
+ while (i < bio_slab_nr) {
+- struct bio_slab *bslab = &bio_slabs[i];
++ bslab = &bio_slabs[i];
+
+ if (!bslab->slab && entry == -1)
+ entry = i;
+@@ -1196,7 +1196,7 @@ static void bio_copy_kern_endio(struct b
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+- char *p = bmd->sgvecs[0].iov_base;
++ char *p = (__force char *)bmd->sgvecs[0].iov_base;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+diff -urNp linux-2.6.31.7/fs/btrfs/ctree.c linux-2.6.31.7/fs/btrfs/ctree.c
+--- linux-2.6.31.7/fs/btrfs/ctree.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/ctree.c 2009-12-08 17:39:44.059528722 -0500
+@@ -3562,7 +3562,6 @@ setup_items_for_insert(struct btrfs_tran
+
+ ret = 0;
+ if (slot == 0) {
+- struct btrfs_disk_key disk_key;
+ btrfs_cpu_key_to_disk(&disk_key, cpu_key);
+ ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+ }
+diff -urNp linux-2.6.31.7/fs/btrfs/ctree.h linux-2.6.31.7/fs/btrfs/ctree.h
+--- linux-2.6.31.7/fs/btrfs/ctree.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/ctree.h 2009-12-08 17:39:44.060786332 -0500
+@@ -2286,7 +2286,7 @@ int btrfs_sync_file(struct file *file, s
+ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+ int skip_pinned);
+ int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
+-extern struct file_operations btrfs_file_operations;
++extern const struct file_operations btrfs_file_operations;
+ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ u64 start, u64 end, u64 locked_end,
+diff -urNp linux-2.6.31.7/fs/btrfs/disk-io.c linux-2.6.31.7/fs/btrfs/disk-io.c
+--- linux-2.6.31.7/fs/btrfs/disk-io.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/disk-io.c 2009-12-08 17:39:44.068765750 -0500
+@@ -772,7 +772,7 @@ static void btree_invalidatepage(struct
+ }
+ }
+
+-static struct address_space_operations btree_aops = {
++static const struct address_space_operations btree_aops = {
+ .readpage = btree_readpage,
+ .writepage = btree_writepage,
+ .writepages = btree_writepages,
+diff -urNp linux-2.6.31.7/fs/btrfs/file.c linux-2.6.31.7/fs/btrfs/file.c
+--- linux-2.6.31.7/fs/btrfs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/file.c 2009-12-08 17:39:44.068765750 -0500
+@@ -1203,7 +1203,7 @@ out:
+ return ret > 0 ? EIO : ret;
+ }
+
+-static struct vm_operations_struct btrfs_file_vm_ops = {
++static const struct vm_operations_struct btrfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = btrfs_page_mkwrite,
+ };
+@@ -1215,7 +1215,7 @@ static int btrfs_file_mmap(struct file *
+ return 0;
+ }
+
+-struct file_operations btrfs_file_operations = {
++const struct file_operations btrfs_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+diff -urNp linux-2.6.31.7/fs/btrfs/free-space-cache.c linux-2.6.31.7/fs/btrfs/free-space-cache.c
+--- linux-2.6.31.7/fs/btrfs/free-space-cache.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/free-space-cache.c 2009-12-08 17:39:44.075930025 -0500
+@@ -1066,8 +1066,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
+
+ while(1) {
+ if (entry->bytes < bytes || entry->offset < min_start) {
+- struct rb_node *node;
+-
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ break;
+@@ -1218,7 +1216,7 @@ again:
+ */
+ while (entry->bitmap || found_bitmap ||
+ (!entry->bitmap && entry->bytes < min_bytes)) {
+- struct rb_node *node = rb_next(&entry->offset_index);
++ node = rb_next(&entry->offset_index);
+
+ if (entry->bitmap && entry->bytes > bytes + empty_size) {
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster,
+diff -urNp linux-2.6.31.7/fs/btrfs/inode.c linux-2.6.31.7/fs/btrfs/inode.c
+--- linux-2.6.31.7/fs/btrfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/inode.c 2009-12-08 17:39:44.085642502 -0500
+@@ -55,14 +55,14 @@ struct btrfs_iget_args {
+ struct btrfs_root *root;
+ };
+
+-static struct inode_operations btrfs_dir_inode_operations;
+-static struct inode_operations btrfs_symlink_inode_operations;
+-static struct inode_operations btrfs_dir_ro_inode_operations;
+-static struct inode_operations btrfs_special_inode_operations;
+-static struct inode_operations btrfs_file_inode_operations;
+-static struct address_space_operations btrfs_aops;
+-static struct address_space_operations btrfs_symlink_aops;
+-static struct file_operations btrfs_dir_file_operations;
++static const struct inode_operations btrfs_dir_inode_operations;
++static const struct inode_operations btrfs_symlink_inode_operations;
++static const struct inode_operations btrfs_dir_ro_inode_operations;
++static const struct inode_operations btrfs_special_inode_operations;
++static const struct inode_operations btrfs_file_inode_operations;
++static const struct address_space_operations btrfs_aops;
++static const struct address_space_operations btrfs_symlink_aops;
++static const struct file_operations btrfs_dir_file_operations;
+ static struct extent_io_ops btrfs_extent_io_ops;
+
+ static struct kmem_cache *btrfs_inode_cachep;
+@@ -5201,7 +5201,7 @@ static int btrfs_permission(struct inode
+ return generic_permission(inode, mask, btrfs_check_acl);
+ }
+
+-static struct inode_operations btrfs_dir_inode_operations = {
++static const struct inode_operations btrfs_dir_inode_operations = {
+ .getattr = btrfs_getattr,
+ .lookup = btrfs_lookup,
+ .create = btrfs_create,
+@@ -5219,11 +5219,11 @@ static struct inode_operations btrfs_dir
+ .removexattr = btrfs_removexattr,
+ .permission = btrfs_permission,
+ };
+-static struct inode_operations btrfs_dir_ro_inode_operations = {
++static const struct inode_operations btrfs_dir_ro_inode_operations = {
+ .lookup = btrfs_lookup,
+ .permission = btrfs_permission,
+ };
+-static struct file_operations btrfs_dir_file_operations = {
++static const struct file_operations btrfs_dir_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .readdir = btrfs_real_readdir,
+@@ -5259,7 +5259,7 @@ static struct extent_io_ops btrfs_extent
+ *
+ * For now we're avoiding this by dropping bmap.
+ */
+-static struct address_space_operations btrfs_aops = {
++static const struct address_space_operations btrfs_aops = {
+ .readpage = btrfs_readpage,
+ .writepage = btrfs_writepage,
+ .writepages = btrfs_writepages,
+@@ -5271,14 +5271,14 @@ static struct address_space_operations b
+ .set_page_dirty = btrfs_set_page_dirty,
+ };
+
+-static struct address_space_operations btrfs_symlink_aops = {
++static const struct address_space_operations btrfs_symlink_aops = {
+ .readpage = btrfs_readpage,
+ .writepage = btrfs_writepage,
+ .invalidatepage = btrfs_invalidatepage,
+ .releasepage = btrfs_releasepage,
+ };
+
+-static struct inode_operations btrfs_file_inode_operations = {
++static const struct inode_operations btrfs_file_inode_operations = {
+ .truncate = btrfs_truncate,
+ .getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
+@@ -5290,7 +5290,7 @@ static struct inode_operations btrfs_fil
+ .fallocate = btrfs_fallocate,
+ .fiemap = btrfs_fiemap,
+ };
+-static struct inode_operations btrfs_special_inode_operations = {
++static const struct inode_operations btrfs_special_inode_operations = {
+ .getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
+ .permission = btrfs_permission,
+@@ -5299,7 +5299,7 @@ static struct inode_operations btrfs_spe
+ .listxattr = btrfs_listxattr,
+ .removexattr = btrfs_removexattr,
+ };
+-static struct inode_operations btrfs_symlink_inode_operations = {
++static const struct inode_operations btrfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+diff -urNp linux-2.6.31.7/fs/btrfs/super.c linux-2.6.31.7/fs/btrfs/super.c
+--- linux-2.6.31.7/fs/btrfs/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/super.c 2009-12-08 17:39:44.085642502 -0500
+@@ -51,7 +51,7 @@
+ #include "export.h"
+ #include "compression.h"
+
+-static struct super_operations btrfs_super_ops;
++static const struct super_operations btrfs_super_ops;
+
+ static void btrfs_put_super(struct super_block *sb)
+ {
+@@ -675,7 +675,7 @@ static int btrfs_unfreeze(struct super_b
+ return 0;
+ }
+
+-static struct super_operations btrfs_super_ops = {
++static const struct super_operations btrfs_super_ops = {
+ .delete_inode = btrfs_delete_inode,
+ .put_super = btrfs_put_super,
+ .sync_fs = btrfs_sync_fs,
+diff -urNp linux-2.6.31.7/fs/btrfs/sysfs.c linux-2.6.31.7/fs/btrfs/sysfs.c
+--- linux-2.6.31.7/fs/btrfs/sysfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/btrfs/sysfs.c 2009-12-08 17:39:44.086752622 -0500
+@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
+ complete(&root->kobj_unregister);
+ }
+
+-static struct sysfs_ops btrfs_super_attr_ops = {
++static const struct sysfs_ops btrfs_super_attr_ops = {
+ .show = btrfs_super_attr_show,
+ .store = btrfs_super_attr_store,
+ };
+
+-static struct sysfs_ops btrfs_root_attr_ops = {
++static const struct sysfs_ops btrfs_root_attr_ops = {
+ .show = btrfs_root_attr_show,
+ .store = btrfs_root_attr_store,
+ };
+diff -urNp linux-2.6.31.7/fs/buffer.c linux-2.6.31.7/fs/buffer.c
+--- linux-2.6.31.7/fs/buffer.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/buffer.c 2009-12-08 17:39:44.111793527 -0500
+@@ -25,6 +25,7 @@
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/blkdev.h>
+ #include <linux/file.h>
+ #include <linux/quotaops.h>
+@@ -2233,6 +2234,7 @@ int generic_cont_expand_simple(struct in
+
+ err = -EFBIG;
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1);
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+diff -urNp linux-2.6.31.7/fs/cachefiles/rdwr.c linux-2.6.31.7/fs/cachefiles/rdwr.c
+--- linux-2.6.31.7/fs/cachefiles/rdwr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/cachefiles/rdwr.c 2009-12-08 17:39:44.111793527 -0500
+@@ -839,7 +839,7 @@ int cachefiles_write_page(struct fscache
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = file->f_op->write(
+- file, (const void __user *) data, PAGE_SIZE,
++ file, (__force const void __user *) data, PAGE_SIZE,
+ &pos);
+ set_fs(old_fs);
+ kunmap(page);
+diff -urNp linux-2.6.31.7/fs/cifs/cifs_dfs_ref.c linux-2.6.31.7/fs/cifs/cifs_dfs_ref.c
+--- linux-2.6.31.7/fs/cifs/cifs_dfs_ref.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/cifs/cifs_dfs_ref.c 2009-12-08 17:39:44.111793527 -0500
+@@ -385,7 +385,7 @@ out_err:
+ goto out;
+ }
+
+-struct inode_operations cifs_dfs_referral_inode_operations = {
++const struct inode_operations cifs_dfs_referral_inode_operations = {
+ .follow_link = cifs_dfs_follow_mountpoint,
+ };
+
+diff -urNp linux-2.6.31.7/fs/cifs/cifsfs.h linux-2.6.31.7/fs/cifs/cifsfs.h
+--- linux-2.6.31.7/fs/cifs/cifsfs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/cifs/cifsfs.h 2009-12-08 17:39:44.112796385 -0500
+@@ -67,7 +67,7 @@ extern int cifs_setattr(struct dentry *,
+
+ extern const struct inode_operations cifs_file_inode_ops;
+ extern const struct inode_operations cifs_symlink_inode_ops;
+-extern struct inode_operations cifs_dfs_referral_inode_operations;
++extern const struct inode_operations cifs_dfs_referral_inode_operations;
+
+
+ /* Functions related to files and directories */
+diff -urNp linux-2.6.31.7/fs/cifs/cifs_uniupr.h linux-2.6.31.7/fs/cifs/cifs_uniupr.h
+--- linux-2.6.31.7/fs/cifs/cifs_uniupr.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/cifs/cifs_uniupr.h 2009-12-08 17:39:44.112796385 -0500
+@@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa
+ {0x0490, 0x04cc, UniCaseRangeU0490},
+ {0x1e00, 0x1ffc, UniCaseRangeU1e00},
+ {0xff40, 0xff5a, UniCaseRangeUff40},
+- {0}
++ {0, 0, NULL}
+ };
+ #endif
+
+diff -urNp linux-2.6.31.7/fs/cifs/link.c linux-2.6.31.7/fs/cifs/link.c
+--- linux-2.6.31.7/fs/cifs/link.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/cifs/link.c 2009-12-08 17:39:44.112796385 -0500
+@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
+
+ void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
+ {
+- char *p = nd_get_link(nd);
++ const char *p = nd_get_link(nd);
+ if (!IS_ERR(p))
+ kfree(p);
+ }
+diff -urNp linux-2.6.31.7/fs/compat_binfmt_elf.c linux-2.6.31.7/fs/compat_binfmt_elf.c
+--- linux-2.6.31.7/fs/compat_binfmt_elf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/compat_binfmt_elf.c 2009-12-08 17:39:44.113776144 -0500
+@@ -29,10 +29,12 @@
+ #undef elfhdr
+ #undef elf_phdr
+ #undef elf_note
++#undef elf_dyn
+ #undef elf_addr_t
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+ #define elf_addr_t Elf32_Addr
+
+ /*
+diff -urNp linux-2.6.31.7/fs/compat.c linux-2.6.31.7/fs/compat.c
+--- linux-2.6.31.7/fs/compat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/compat.c 2009-12-08 17:39:44.113776144 -0500
+@@ -1417,14 +1417,12 @@ static int compat_copy_strings(int argc,
+ if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
+ struct page *page;
+
+-#ifdef CONFIG_STACK_GROWSUP
+ ret = expand_stack_downwards(bprm->vma, pos);
+ if (ret < 0) {
+ /* We've exceed the stack rlimit. */
+ ret = -E2BIG;
+ goto out;
+ }
+-#endif
+ ret = get_user_pages(current, bprm->mm, pos,
+ 1, 1, 1, &page, NULL);
+ if (ret <= 0) {
+@@ -1470,6 +1468,11 @@ int compat_do_execve(char * filename,
+ compat_uptr_t __user *envp,
+ struct pt_regs * regs)
+ {
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+ struct linux_binprm *bprm;
+ struct file *file;
+ struct files_struct *displaced;
+@@ -1506,6 +1509,14 @@ int compat_do_execve(char * filename,
+ bprm->filename = filename;
+ bprm->interp = filename;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
++ retval = -EAGAIN;
++ if (gr_handle_nproc())
++ goto out_file;
++ retval = -EACCES;
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
++ goto out_file;
++
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
+@@ -1535,9 +1546,40 @@ int compat_do_execve(char * filename,
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(bprm, (char __user * __user *)argv);
++
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
++ bprm->unsafe & LSM_UNSAFE_SHARE);
++ if (retval < 0)
++ goto out_fail;
++
+ retval = search_binary_handler(bprm, regs);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+
+ /* execve succeeded */
+ current->fs->in_exec = 0;
+@@ -1548,6 +1590,14 @@ int compat_do_execve(char * filename,
+ put_files_struct(displaced);
+ return retval;
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ if (bprm->mm)
+ mmput(bprm->mm);
+diff -urNp linux-2.6.31.7/fs/compat_ioctl.c linux-2.6.31.7/fs/compat_ioctl.c
+--- linux-2.6.31.7/fs/compat_ioctl.c 2009-12-08 17:29:51.631743554 -0500
++++ linux-2.6.31.7/fs/compat_ioctl.c 2009-12-08 17:39:44.114797657 -0500
+@@ -1827,15 +1827,15 @@ struct ioctl_trans {
+ };
+
+ #define HANDLE_IOCTL(cmd,handler) \
+- { (cmd), (ioctl_trans_handler_t)(handler) },
++ { (cmd), (ioctl_trans_handler_t)(handler), NULL },
+
+ /* pointer to compatible structure or no argument */
+ #define COMPATIBLE_IOCTL(cmd) \
+- { (cmd), do_ioctl32_pointer },
++ { (cmd), do_ioctl32_pointer, NULL },
+
+ /* argument is an unsigned long integer, not a pointer */
+ #define ULONG_IOCTL(cmd) \
+- { (cmd), (ioctl_trans_handler_t)sys_ioctl },
++ { (cmd), (ioctl_trans_handler_t)sys_ioctl, NULL },
+
+ /* ioctl should not be warned about even if it's not implemented.
+ Valid reasons to use this:
+diff -urNp linux-2.6.31.7/fs/debugfs/inode.c linux-2.6.31.7/fs/debugfs/inode.c
+--- linux-2.6.31.7/fs/debugfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/debugfs/inode.c 2009-12-08 17:39:44.123366802 -0500
+@@ -118,7 +118,7 @@ static inline int debugfs_positive(struc
+
+ static int debug_fill_super(struct super_block *sb, void *data, int silent)
+ {
+- static struct tree_descr debug_files[] = {{""}};
++ static struct tree_descr debug_files[] = {{"", NULL, 0}};
+
+ return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
+ }
+diff -urNp linux-2.6.31.7/fs/dlm/debug_fs.c linux-2.6.31.7/fs/dlm/debug_fs.c
+--- linux-2.6.31.7/fs/dlm/debug_fs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/dlm/debug_fs.c 2009-12-08 17:39:44.133683458 -0500
+@@ -386,9 +386,9 @@ static int table_seq_show(struct seq_fil
+ return rv;
+ }
+
+-static struct seq_operations format1_seq_ops;
+-static struct seq_operations format2_seq_ops;
+-static struct seq_operations format3_seq_ops;
++static const struct seq_operations format1_seq_ops;
++static const struct seq_operations format2_seq_ops;
++static const struct seq_operations format3_seq_ops;
+
+ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+@@ -534,21 +534,21 @@ static void table_seq_stop(struct seq_fi
+ }
+ }
+
+-static struct seq_operations format1_seq_ops = {
++static const struct seq_operations format1_seq_ops = {
+ .start = table_seq_start,
+ .next = table_seq_next,
+ .stop = table_seq_stop,
+ .show = table_seq_show,
+ };
+
+-static struct seq_operations format2_seq_ops = {
++static const struct seq_operations format2_seq_ops = {
+ .start = table_seq_start,
+ .next = table_seq_next,
+ .stop = table_seq_stop,
+ .show = table_seq_show,
+ };
+
+-static struct seq_operations format3_seq_ops = {
++static const struct seq_operations format3_seq_ops = {
+ .start = table_seq_start,
+ .next = table_seq_next,
+ .stop = table_seq_stop,
+diff -urNp linux-2.6.31.7/fs/dlm/lockspace.c linux-2.6.31.7/fs/dlm/lockspace.c
+--- linux-2.6.31.7/fs/dlm/lockspace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/dlm/lockspace.c 2009-12-08 17:39:44.133683458 -0500
+@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
+ kfree(ls);
+ }
+
+-static struct sysfs_ops dlm_attr_ops = {
++static const struct sysfs_ops dlm_attr_ops = {
+ .show = dlm_attr_show,
+ .store = dlm_attr_store,
+ };
+diff -urNp linux-2.6.31.7/fs/ecryptfs/ecryptfs_kernel.h linux-2.6.31.7/fs/ecryptfs/ecryptfs_kernel.h
+--- linux-2.6.31.7/fs/ecryptfs/ecryptfs_kernel.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ecryptfs/ecryptfs_kernel.h 2009-12-08 17:39:44.133683458 -0500
+@@ -582,7 +582,7 @@ extern const struct inode_operations ecr
+ extern const struct inode_operations ecryptfs_symlink_iops;
+ extern const struct super_operations ecryptfs_sops;
+ extern const struct dentry_operations ecryptfs_dops;
+-extern struct address_space_operations ecryptfs_aops;
++extern const struct address_space_operations ecryptfs_aops;
+ extern int ecryptfs_verbosity;
+ extern unsigned int ecryptfs_message_buf_len;
+ extern signed long ecryptfs_message_wait_timeout;
+diff -urNp linux-2.6.31.7/fs/ecryptfs/inode.c linux-2.6.31.7/fs/ecryptfs/inode.c
+--- linux-2.6.31.7/fs/ecryptfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ecryptfs/inode.c 2009-12-08 17:39:44.134794483 -0500
+@@ -676,7 +676,7 @@ ecryptfs_readlink(struct dentry *dentry,
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+- (char __user *)lower_buf,
++ (__force char __user *)lower_buf,
+ lower_bufsiz);
+ set_fs(old_fs);
+ if (rc >= 0) {
+@@ -720,7 +720,7 @@ static void *ecryptfs_follow_link(struct
+ }
+ old_fs = get_fs();
+ set_fs(get_ds());
+- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
++ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
+ set_fs(old_fs);
+ if (rc < 0)
+ goto out_free;
+diff -urNp linux-2.6.31.7/fs/ecryptfs/mmap.c linux-2.6.31.7/fs/ecryptfs/mmap.c
+--- linux-2.6.31.7/fs/ecryptfs/mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ecryptfs/mmap.c 2009-12-08 17:39:44.134794483 -0500
+@@ -545,7 +545,7 @@ static sector_t ecryptfs_bmap(struct add
+ return rc;
+ }
+
+-struct address_space_operations ecryptfs_aops = {
++const struct address_space_operations ecryptfs_aops = {
+ .writepage = ecryptfs_writepage,
+ .readpage = ecryptfs_readpage,
+ .write_begin = ecryptfs_write_begin,
+diff -urNp linux-2.6.31.7/fs/exec.c linux-2.6.31.7/fs/exec.c
+--- linux-2.6.31.7/fs/exec.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/exec.c 2009-12-08 17:39:44.135699953 -0500
+@@ -55,12 +55,24 @@
+ #include <linux/kmod.h>
+ #include <linux/fsnotify.h>
+ #include <linux/fs_struct.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
+ #include "internal.h"
+
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int core_uses_pid;
+ char core_pattern[CORENAME_MAX_SIZE] = "core";
+ int suid_dumpable = 0;
+@@ -113,7 +125,7 @@ SYSCALL_DEFINE1(uselib, const char __use
+ goto out;
+
+ file = do_filp_open(AT_FDCWD, tmp,
+- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
++ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
+ MAY_READ | MAY_EXEC | MAY_OPEN);
+ putname(tmp);
+ error = PTR_ERR(file);
+@@ -161,18 +173,10 @@ static struct page *get_arg_page(struct
+ int write)
+ {
+ struct page *page;
+- int ret;
+
+-#ifdef CONFIG_STACK_GROWSUP
+- if (write) {
+- ret = expand_stack_downwards(bprm->vma, pos);
+- if (ret < 0)
+- return NULL;
+- }
+-#endif
+- ret = get_user_pages(current, bprm->mm, pos,
+- 1, write, 1, &page, NULL);
+- if (ret <= 0)
++ if (0 > expand_stack_downwards(bprm->vma, pos))
++ return NULL;
++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
+ return NULL;
+
+ if (write) {
+@@ -244,6 +248,11 @@ static int __bprm_mm_init(struct linux_b
+ vma->vm_end = STACK_TOP_MAX;
+ vma->vm_start = vma->vm_end - PAGE_SIZE;
+ vma->vm_flags = VM_STACK_FLAGS;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ err = insert_vm_struct(mm, vma);
+ if (err)
+@@ -252,6 +261,12 @@ static int __bprm_mm_init(struct linux_b
+ mm->stack_vm = mm->total_vm = 1;
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
++#endif
++
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+@@ -473,7 +488,7 @@ int copy_strings_kernel(int argc,char **
+ int r;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- r = copy_strings(argc, (char __user * __user *)argv, bprm);
++ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
+ set_fs(oldfs);
+ return r;
+ }
+@@ -503,7 +518,8 @@ static int shift_arg_pages(struct vm_are
+ unsigned long new_end = old_end - shift;
+ struct mmu_gather *tlb;
+
+- BUG_ON(new_start > new_end);
++ if (new_start >= new_end || new_start < mmap_min_addr)
++ return -EFAULT;
+
+ /*
+ * ensure there are no vmas between where we want to go
+@@ -512,6 +528,10 @@ static int shift_arg_pages(struct vm_are
+ if (vma != find_vma(mm, new_start))
+ return -EFAULT;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+@@ -600,6 +620,14 @@ int setup_arg_pages(struct linux_binprm
+ bprm->exec -= stack_shift;
+
+ down_write(&mm->mmap_sem);
++
++ /* Move stack pages down in memory. */
++ if (stack_shift) {
++ ret = shift_arg_pages(vma, stack_shift);
++ if (ret)
++ goto out_unlock;
++ }
++
+ vm_flags = VM_STACK_FLAGS;
+
+ /*
+@@ -613,21 +641,24 @@ int setup_arg_pages(struct linux_binprm
+ vm_flags &= ~VM_EXEC;
+ vm_flags |= mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
+ vm_flags);
+ if (ret)
+ goto out_unlock;
+ BUG_ON(prev != vma);
+
+- /* Move stack pages down in memory. */
+- if (stack_shift) {
+- ret = shift_arg_pages(vma, stack_shift);
+- if (ret) {
+- up_write(&mm->mmap_sem);
+- return ret;
+- }
+- }
+-
+ #ifdef CONFIG_STACK_GROWSUP
+ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ #else
+@@ -639,7 +670,7 @@ int setup_arg_pages(struct linux_binprm
+
+ out_unlock:
+ up_write(&mm->mmap_sem);
+- return 0;
++ return ret;
+ }
+ EXPORT_SYMBOL(setup_arg_pages);
+
+@@ -651,7 +682,7 @@ struct file *open_exec(const char *name)
+ int err;
+
+ file = do_filp_open(AT_FDCWD, name,
+- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
++ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
+ MAY_EXEC | MAY_OPEN);
+ if (IS_ERR(file))
+ goto out;
+@@ -688,7 +719,7 @@ int kernel_read(struct file *file, loff_
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- result = vfs_read(file, (void __user *)addr, count, &pos);
++ result = vfs_read(file, (__force void __user *)addr, count, &pos);
+ set_fs(old_fs);
+ return result;
+ }
+@@ -1085,7 +1116,7 @@ int check_unsafe_exec(struct linux_binpr
+ }
+ rcu_read_unlock();
+
+- if (p->fs->users > n_fs) {
++ if (atomic_read(&p->fs->users) > n_fs) {
+ bprm->unsafe |= LSM_UNSAFE_SHARE;
+ } else {
+ res = -EAGAIN;
+@@ -1284,6 +1315,11 @@ int do_execve(char * filename,
+ char __user *__user *envp,
+ struct pt_regs * regs)
+ {
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+ struct linux_binprm *bprm;
+ struct file *file;
+ struct files_struct *displaced;
+@@ -1320,6 +1356,18 @@ int do_execve(char * filename,
+ bprm->filename = filename;
+ bprm->interp = filename;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ retval = -EAGAIN;
++ goto out_file;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ retval = -EACCES;
++ goto out_file;
++ }
++
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
+@@ -1349,10 +1397,41 @@ int do_execve(char * filename,
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(bprm, argv);
++
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
++ bprm->unsafe & LSM_UNSAFE_SHARE);
++ if (retval < 0)
++ goto out_fail;
++
+ current->flags &= ~PF_KTHREAD;
+ retval = search_binary_handler(bprm,regs);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+
+ /* execve succeeded */
+ current->fs->in_exec = 0;
+@@ -1363,6 +1442,14 @@ int do_execve(char * filename,
+ put_files_struct(displaced);
+ return retval;
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ if (bprm->mm)
+ mmput (bprm->mm);
+@@ -1528,6 +1615,164 @@ out:
+ return ispipe;
+ }
+
++int pax_check_flags(unsigned long *flags)
++{
++ int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & MF_PAX_SEGMEXEC)
++ {
++ *flags &= ~MF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++ char *path_exec = NULL;
++ char *path_fault = NULL;
++ unsigned long start = 0UL, end = 0UL, offset = 0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ else {
++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
++ if (path_exec) {
++ *path_exec = 0;
++ path_exec = buffer_exec;
++ } else
++ path_exec = "<path too long>";
++ }
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ else {
++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
++ if (path_fault) {
++ *path_fault = 0;
++ path_fault = buffer_fault;
++ } else
++ path_fault = "<path too long>";
++ }
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ if (tsk->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: execution attempt in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->signal->curr_ip), path_fault, start, end, offset);
++ else
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++ task_uid(tsk), task_euid(tsk), pc, sp);
++ free_page((unsigned long)buffer_exec);
++ free_page((unsigned long)buffer_fault);
++ pax_report_insns(pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++void pax_report_refcount_overflow(struct pt_regs *regs)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ NIPQUAD(current->signal->curr_ip), current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
++ show_regs(regs);
++ force_sig_specific(SIGKILL, current);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++void pax_report_leak_to_user(const void *ptr, unsigned long len)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: kernel memory leak attempt detected from %p (%lu bytes)\n", NIPQUAD(current->signal->curr_ip), ptr, len);
++ else
++ printk(KERN_ERR "PAX: kernel memory leak attempt detected from %p (%lu bytes)\n", ptr, len);
++ dump_stack();
++ do_group_exit(SIGKILL);
++}
++
++void pax_report_overflow_from_user(const void *ptr, unsigned long len)
++{
++ printk(KERN_ERR "PAX: kernel memory overflow attempt detected to %p (%lu bytes)\n", ptr, len);
++ dump_stack();
++ do_group_exit(SIGKILL);
++}
++#endif
++
+ static int zap_process(struct task_struct *start)
+ {
+ struct task_struct *t;
+@@ -1787,6 +2032,10 @@ void do_coredump(long signr, int exit_co
+ */
+ clear_thread_flag(TIF_SIGPENDING);
+
++ if (signr == SIGKILL || signr == SIGILL)
++ gr_handle_brute_attach(current);
++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
++
+ /*
+ * lock_kernel() because format_corename() is controlled by sysctl, which
+ * uses lock_kernel()
+diff -urNp linux-2.6.31.7/fs/ext2/balloc.c linux-2.6.31.7/fs/ext2/balloc.c
+--- linux-2.6.31.7/fs/ext2/balloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext2/balloc.c 2009-12-08 17:39:44.135699953 -0500
+@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
+ sbi->s_resuid != current_fsuid() &&
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+diff -urNp linux-2.6.31.7/fs/ext3/balloc.c linux-2.6.31.7/fs/ext3/balloc.c
+--- linux-2.6.31.7/fs/ext3/balloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext3/balloc.c 2009-12-08 17:39:44.136793842 -0500
+@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
+ sbi->s_resuid != current_fsuid() &&
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+diff -urNp linux-2.6.31.7/fs/ext3/namei.c linux-2.6.31.7/fs/ext3/namei.c
+--- linux-2.6.31.7/fs/ext3/namei.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext3/namei.c 2009-12-08 17:39:44.137793462 -0500
+@@ -1168,7 +1168,7 @@ static struct ext3_dir_entry_2 *do_split
+ char *data1 = (*bh)->b_data, *data2;
+ unsigned split, move, size;
+ struct ext3_dir_entry_2 *de = NULL, *de2;
+- int err = 0, i;
++ int i, err = 0;
+
+ bh2 = ext3_append (handle, dir, &newblock, &err);
+ if (!(bh2)) {
+diff -urNp linux-2.6.31.7/fs/ext3/xattr.c linux-2.6.31.7/fs/ext3/xattr.c
+--- linux-2.6.31.7/fs/ext3/xattr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext3/xattr.c 2009-12-08 17:39:44.137793462 -0500
+@@ -89,8 +89,8 @@
+ printk("\n"); \
+ } while (0)
+ #else
+-# define ea_idebug(f...)
+-# define ea_bdebug(f...)
++# define ea_idebug(f...) do {} while (0)
++# define ea_bdebug(f...) do {} while (0)
+ #endif
+
+ static void ext3_xattr_cache_insert(struct buffer_head *);
+diff -urNp linux-2.6.31.7/fs/ext4/balloc.c linux-2.6.31.7/fs/ext4/balloc.c
+--- linux-2.6.31.7/fs/ext4/balloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext4/balloc.c 2009-12-08 17:39:44.137793462 -0500
+@@ -573,7 +573,7 @@ int ext4_has_free_blocks(struct ext4_sb_
+ /* Hm, nope. Are (enough) root reserved blocks available? */
+ if (sbi->s_resuid == current_fsuid() ||
+ ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
+- capable(CAP_SYS_RESOURCE)) {
++ capable_nolog(CAP_SYS_RESOURCE)) {
+ if (free_blocks >= (nblocks + dirty_blocks))
+ return 1;
+ }
+diff -urNp linux-2.6.31.7/fs/ext4/file.c linux-2.6.31.7/fs/ext4/file.c
+--- linux-2.6.31.7/fs/ext4/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext4/file.c 2009-12-08 17:39:44.138793510 -0500
+@@ -130,7 +130,7 @@ force_commit:
+ return ret;
+ }
+
+-static struct vm_operations_struct ext4_file_vm_ops = {
++static const struct vm_operations_struct ext4_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = ext4_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/ext4/mballoc.c linux-2.6.31.7/fs/ext4/mballoc.c
+--- linux-2.6.31.7/fs/ext4/mballoc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext4/mballoc.c 2009-12-08 17:39:44.139792655 -0500
+@@ -2205,7 +2205,7 @@ static void ext4_mb_seq_history_stop(str
+ {
+ }
+
+-static struct seq_operations ext4_mb_seq_history_ops = {
++static const struct seq_operations ext4_mb_seq_history_ops = {
+ .start = ext4_mb_seq_history_start,
+ .next = ext4_mb_seq_history_next,
+ .stop = ext4_mb_seq_history_stop,
+@@ -2287,7 +2287,7 @@ static ssize_t ext4_mb_seq_history_write
+ return count;
+ }
+
+-static struct file_operations ext4_mb_seq_history_fops = {
++static const struct file_operations ext4_mb_seq_history_fops = {
+ .owner = THIS_MODULE,
+ .open = ext4_mb_seq_history_open,
+ .read = seq_read,
+@@ -2366,7 +2366,7 @@ static void ext4_mb_seq_groups_stop(stru
+ {
+ }
+
+-static struct seq_operations ext4_mb_seq_groups_ops = {
++static const struct seq_operations ext4_mb_seq_groups_ops = {
+ .start = ext4_mb_seq_groups_start,
+ .next = ext4_mb_seq_groups_next,
+ .stop = ext4_mb_seq_groups_stop,
+@@ -2387,7 +2387,7 @@ static int ext4_mb_seq_groups_open(struc
+
+ }
+
+-static struct file_operations ext4_mb_seq_groups_fops = {
++static const struct file_operations ext4_mb_seq_groups_fops = {
+ .owner = THIS_MODULE,
+ .open = ext4_mb_seq_groups_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/fs/ext4/namei.c linux-2.6.31.7/fs/ext4/namei.c
+--- linux-2.6.31.7/fs/ext4/namei.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext4/namei.c 2009-12-08 17:39:44.139792655 -0500
+@@ -1203,7 +1203,7 @@ static struct ext4_dir_entry_2 *do_split
+ char *data1 = (*bh)->b_data, *data2;
+ unsigned split, move, size;
+ struct ext4_dir_entry_2 *de = NULL, *de2;
+- int err = 0, i;
++ int i, err = 0;
+
+ bh2 = ext4_append (handle, dir, &newblock, &err);
+ if (!(bh2)) {
+diff -urNp linux-2.6.31.7/fs/ext4/super.c linux-2.6.31.7/fs/ext4/super.c
+--- linux-2.6.31.7/fs/ext4/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ext4/super.c 2009-12-08 17:39:44.140793824 -0500
+@@ -2242,7 +2242,7 @@ static void ext4_sb_release(struct kobje
+ }
+
+
+-static struct sysfs_ops ext4_attr_ops = {
++static const struct sysfs_ops ext4_attr_ops = {
+ .show = ext4_attr_show,
+ .store = ext4_attr_store,
+ };
+diff -urNp linux-2.6.31.7/fs/fcntl.c linux-2.6.31.7/fs/fcntl.c
+--- linux-2.6.31.7/fs/fcntl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fcntl.c 2009-12-08 17:39:44.141793581 -0500
+@@ -271,6 +271,7 @@ static long do_fcntl(int fd, unsigned in
+ switch (cmd) {
+ case F_DUPFD:
+ case F_DUPFD_CLOEXEC:
++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
+ if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+ break;
+ err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
+@@ -421,7 +422,8 @@ static inline int sigio_perm(struct task
+ ret = ((fown->euid == 0 ||
+ fown->euid == cred->suid || fown->euid == cred->uid ||
+ fown->uid == cred->suid || fown->uid == cred->uid) &&
+- !security_file_send_sigiotask(p, fown, sig));
++ !security_file_send_sigiotask(p, fown, sig) &&
++ !gr_check_protected_task(p) && !gr_pid_is_chrooted(p));
+ rcu_read_unlock();
+ return ret;
+ }
+diff -urNp linux-2.6.31.7/fs/fifo.c linux-2.6.31.7/fs/fifo.c
+--- linux-2.6.31.7/fs/fifo.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fifo.c 2009-12-08 17:39:44.141793581 -0500
+@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &read_pipefifo_fops;
+ pipe->r_counter++;
+- if (pipe->readers++ == 0)
++ if (atomic_inc_return(&pipe->readers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->writers) {
++ if (!atomic_read(&pipe->writers)) {
+ if ((filp->f_flags & O_NONBLOCK)) {
+ /* suppress POLLHUP until we have
+ * seen a writer */
+@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
+ * errno=ENXIO when there is no process reading the FIFO.
+ */
+ ret = -ENXIO;
+- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+ goto err;
+
+ filp->f_op = &write_pipefifo_fops;
+ pipe->w_counter++;
+- if (!pipe->writers++)
++ if (atomic_inc_return(&pipe->writers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ wait_for_partner(inode, &pipe->r_counter);
+ if (signal_pending(current))
+ goto err_wr;
+@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &rdwr_pipefifo_fops;
+
+- pipe->readers++;
+- pipe->writers++;
++ atomic_inc(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe->r_counter++;
+ pipe->w_counter++;
+- if (pipe->readers == 1 || pipe->writers == 1)
++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+ wake_up_partner(inode);
+ break;
+
+@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
+ return 0;
+
+ err_rd:
+- if (!--pipe->readers)
++ if (atomic_dec_and_test(&pipe->readers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err_wr:
+- if (!--pipe->writers)
++ if (atomic_dec_and_test(&pipe->writers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err:
+- if (!pipe->readers && !pipe->writers)
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
+ free_pipe_info(inode);
+
+ err_nocleanup:
+diff -urNp linux-2.6.31.7/fs/file.c linux-2.6.31.7/fs/file.c
+--- linux-2.6.31.7/fs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/file.c 2009-12-08 17:39:44.141793581 -0500
+@@ -13,6 +13,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/fdtable.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+@@ -256,6 +257,8 @@ int expand_files(struct files_struct *fi
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
++
++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
+ if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+ return -EMFILE;
+
+diff -urNp linux-2.6.31.7/fs/fs_struct.c linux-2.6.31.7/fs/fs_struct.c
+--- linux-2.6.31.7/fs/fs_struct.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fs_struct.c 2009-12-08 17:39:44.141793581 -0500
+@@ -89,7 +89,7 @@ void exit_fs(struct task_struct *tsk)
+ task_lock(tsk);
+ write_lock(&fs->lock);
+ tsk->fs = NULL;
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ write_unlock(&fs->lock);
+ task_unlock(tsk);
+ if (kill)
+@@ -102,7 +102,7 @@ struct fs_struct *copy_fs_struct(struct
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+ /* We don't need to lock fs - think why ;-) */
+ if (fs) {
+- fs->users = 1;
++ atomic_set(&fs->users, 1);
+ fs->in_exec = 0;
+ rwlock_init(&fs->lock);
+ fs->umask = old->umask;
+@@ -127,7 +127,7 @@ int unshare_fs_struct(void)
+
+ task_lock(current);
+ write_lock(&fs->lock);
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ current->fs = new_fs;
+ write_unlock(&fs->lock);
+ task_unlock(current);
+@@ -147,7 +147,7 @@ EXPORT_SYMBOL(current_umask);
+
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+- .users = 1,
++ .users = ATOMIC_INIT(1),
+ .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
+ .umask = 0022,
+ };
+@@ -162,12 +162,12 @@ void daemonize_fs_struct(void)
+ task_lock(current);
+
+ write_lock(&init_fs.lock);
+- init_fs.users++;
++ atomic_inc(&init_fs.users);
+ write_unlock(&init_fs.lock);
+
+ write_lock(&fs->lock);
+ current->fs = &init_fs;
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ write_unlock(&fs->lock);
+
+ task_unlock(current);
+diff -urNp linux-2.6.31.7/fs/fuse/control.c linux-2.6.31.7/fs/fuse/control.c
+--- linux-2.6.31.7/fs/fuse/control.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fuse/control.c 2009-12-08 17:39:44.142793433 -0500
+@@ -161,7 +161,7 @@ void fuse_ctl_remove_conn(struct fuse_co
+
+ static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
+ {
+- struct tree_descr empty_descr = {""};
++ struct tree_descr empty_descr = {"", NULL, 0};
+ struct fuse_conn *fc;
+ int err;
+
+diff -urNp linux-2.6.31.7/fs/fuse/dev.c linux-2.6.31.7/fs/fuse/dev.c
+--- linux-2.6.31.7/fs/fuse/dev.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fuse/dev.c 2009-12-08 17:39:44.142793433 -0500
+@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
+ {
+ struct fuse_notify_inval_entry_out outarg;
+ int err = -EINVAL;
+- char buf[FUSE_NAME_MAX+1];
++ char *buf = NULL;
+ struct qstr name;
+
+ if (size < sizeof(outarg))
+@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
+ if (outarg.namelen > FUSE_NAME_MAX)
+ goto err;
+
++ err = -ENOMEM;
++ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
++ if (!buf)
++ goto err;
++
+ name.name = buf;
+ name.len = outarg.namelen;
+ err = fuse_copy_one(cs, buf, outarg.namelen + 1);
+@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+- if (!fc->sb)
+- goto err_unlock;
+-
+- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
+-
+-err_unlock:
++ if (fc->sb)
++ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
+ up_read(&fc->killsb);
++ kfree(buf);
+ return err;
+
+ err:
+ fuse_copy_finish(cs);
++ kfree(buf);
+ return err;
+ }
+
+diff -urNp linux-2.6.31.7/fs/fuse/dir.c linux-2.6.31.7/fs/fuse/dir.c
+--- linux-2.6.31.7/fs/fuse/dir.c 2009-12-08 17:29:51.632738455 -0500
++++ linux-2.6.31.7/fs/fuse/dir.c 2009-12-08 17:39:44.142793433 -0500
+@@ -1125,7 +1125,7 @@ static char *read_link(struct dentry *de
+ return link;
+ }
+
+-static void free_link(char *link)
++static void free_link(const char *link)
+ {
+ if (!IS_ERR(link))
+ free_page((unsigned long) link);
+diff -urNp linux-2.6.31.7/fs/fuse/file.c linux-2.6.31.7/fs/fuse/file.c
+--- linux-2.6.31.7/fs/fuse/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/fuse/file.c 2009-12-08 17:39:44.143801259 -0500
+@@ -1314,7 +1314,7 @@ static int fuse_page_mkwrite(struct vm_a
+ return 0;
+ }
+
+-static struct vm_operations_struct fuse_file_vm_ops = {
++static const struct vm_operations_struct fuse_file_vm_ops = {
+ .close = fuse_vma_close,
+ .fault = filemap_fault,
+ .page_mkwrite = fuse_page_mkwrite,
+diff -urNp linux-2.6.31.7/fs/gfs2/file.c linux-2.6.31.7/fs/gfs2/file.c
+--- linux-2.6.31.7/fs/gfs2/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/gfs2/file.c 2009-12-08 17:39:44.143801259 -0500
+@@ -419,7 +419,7 @@ out:
+ return ret;
+ }
+
+-static struct vm_operations_struct gfs2_vm_ops = {
++static const struct vm_operations_struct gfs2_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = gfs2_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/gfs2/sys.c linux-2.6.31.7/fs/gfs2/sys.c
+--- linux-2.6.31.7/fs/gfs2/sys.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/gfs2/sys.c 2009-12-08 17:39:44.144790616 -0500
+@@ -48,7 +48,7 @@ static ssize_t gfs2_attr_store(struct ko
+ return a->store ? a->store(sdp, buf, len) : len;
+ }
+
+-static struct sysfs_ops gfs2_attr_ops = {
++static const struct sysfs_ops gfs2_attr_ops = {
+ .show = gfs2_attr_show,
+ .store = gfs2_attr_store,
+ };
+@@ -574,7 +574,7 @@ static int gfs2_uevent(struct kset *kset
+ return 0;
+ }
+
+-static struct kset_uevent_ops gfs2_uevent_ops = {
++static const struct kset_uevent_ops gfs2_uevent_ops = {
+ .uevent = gfs2_uevent,
+ };
+
+diff -urNp linux-2.6.31.7/fs/hfs/inode.c linux-2.6.31.7/fs/hfs/inode.c
+--- linux-2.6.31.7/fs/hfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/hfs/inode.c 2009-12-08 17:39:44.144790616 -0500
+@@ -423,7 +423,7 @@ int hfs_write_inode(struct inode *inode,
+
+ if (S_ISDIR(main_inode->i_mode)) {
+ if (fd.entrylength < sizeof(struct hfs_cat_dir))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_dir));
+ if (rec.type != HFS_CDR_DIR ||
+@@ -444,7 +444,7 @@ int hfs_write_inode(struct inode *inode,
+ sizeof(struct hfs_cat_file));
+ } else {
+ if (fd.entrylength < sizeof(struct hfs_cat_file))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_file));
+ if (rec.type != HFS_CDR_FIL ||
+diff -urNp linux-2.6.31.7/fs/hfsplus/inode.c linux-2.6.31.7/fs/hfsplus/inode.c
+--- linux-2.6.31.7/fs/hfsplus/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/hfsplus/inode.c 2009-12-08 17:39:44.144790616 -0500
+@@ -406,7 +406,7 @@ int hfsplus_cat_read_inode(struct inode
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+ if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ hfsplus_get_perms(inode, &folder->permissions, 1);
+@@ -423,7 +423,7 @@ int hfsplus_cat_read_inode(struct inode
+ struct hfsplus_cat_file *file = &entry.file;
+
+ if (fd->entrylength < sizeof(struct hfsplus_cat_file))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_file));
+
+@@ -479,7 +479,7 @@ int hfsplus_cat_write_inode(struct inode
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+ if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ /* simple node checks? */
+@@ -501,7 +501,7 @@ int hfsplus_cat_write_inode(struct inode
+ struct hfsplus_cat_file *file = &entry.file;
+
+ if (fd.entrylength < sizeof(struct hfsplus_cat_file))
+- /* panic? */;
++ {/* panic? */}
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_file));
+ hfsplus_inode_write_fork(inode, &file->data_fork);
+diff -urNp linux-2.6.31.7/fs/ioctl.c linux-2.6.31.7/fs/ioctl.c
+--- linux-2.6.31.7/fs/ioctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ioctl.c 2009-12-08 17:39:44.145794467 -0500
+@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
+ u64 phys, u64 len, u32 flags)
+ {
+ struct fiemap_extent extent;
+- struct fiemap_extent *dest = fieinfo->fi_extents_start;
++ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
+
+ /* only count the extents */
+ if (fieinfo->fi_extents_max == 0) {
+@@ -206,7 +206,7 @@ static int ioctl_fiemap(struct file *fil
+
+ fieinfo.fi_flags = fiemap.fm_flags;
+ fieinfo.fi_extents_max = fiemap.fm_extent_count;
+- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
++ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
+
+ if (fiemap.fm_extent_count != 0 &&
+ !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
+@@ -219,7 +219,7 @@ static int ioctl_fiemap(struct file *fil
+ error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
+ fiemap.fm_flags = fieinfo.fi_flags;
+ fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
+- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
++ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
+ error = -EFAULT;
+
+ return error;
+diff -urNp linux-2.6.31.7/fs/jbd2/journal.c linux-2.6.31.7/fs/jbd2/journal.c
+--- linux-2.6.31.7/fs/jbd2/journal.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/jbd2/journal.c 2009-12-08 17:39:44.145794467 -0500
+@@ -768,7 +768,7 @@ static void jbd2_seq_history_stop(struct
+ {
+ }
+
+-static struct seq_operations jbd2_seq_history_ops = {
++static const struct seq_operations jbd2_seq_history_ops = {
+ .start = jbd2_seq_history_start,
+ .next = jbd2_seq_history_next,
+ .stop = jbd2_seq_history_stop,
+@@ -818,7 +818,7 @@ static int jbd2_seq_history_release(stru
+ return seq_release(inode, file);
+ }
+
+-static struct file_operations jbd2_seq_history_fops = {
++static const struct file_operations jbd2_seq_history_fops = {
+ .owner = THIS_MODULE,
+ .open = jbd2_seq_history_open,
+ .read = seq_read,
+@@ -872,7 +872,7 @@ static void jbd2_seq_info_stop(struct se
+ {
+ }
+
+-static struct seq_operations jbd2_seq_info_ops = {
++static const struct seq_operations jbd2_seq_info_ops = {
+ .start = jbd2_seq_info_start,
+ .next = jbd2_seq_info_next,
+ .stop = jbd2_seq_info_stop,
+@@ -920,7 +920,7 @@ static int jbd2_seq_info_release(struct
+ return seq_release(inode, file);
+ }
+
+-static struct file_operations jbd2_seq_info_fops = {
++static const struct file_operations jbd2_seq_info_fops = {
+ .owner = THIS_MODULE,
+ .open = jbd2_seq_info_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/fs/jffs2/debug.h linux-2.6.31.7/fs/jffs2/debug.h
+--- linux-2.6.31.7/fs/jffs2/debug.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/jffs2/debug.h 2009-12-08 17:39:44.146795175 -0500
+@@ -52,13 +52,13 @@
+ #if CONFIG_JFFS2_FS_DEBUG > 0
+ #define D1(x) x
+ #else
+-#define D1(x)
++#define D1(x) do {} while (0);
+ #endif
+
+ #if CONFIG_JFFS2_FS_DEBUG > 1
+ #define D2(x) x
+ #else
+-#define D2(x)
++#define D2(x) do {} while (0);
+ #endif
+
+ /* The prefixes of JFFS2 messages */
+@@ -114,73 +114,73 @@
+ #ifdef JFFS2_DBG_READINODE_MESSAGES
+ #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_readinode(fmt, ...)
++#define dbg_readinode(fmt, ...) do {} while (0)
+ #endif
+ #ifdef JFFS2_DBG_READINODE2_MESSAGES
+ #define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_readinode2(fmt, ...)
++#define dbg_readinode2(fmt, ...) do {} while (0)
+ #endif
+
+ /* Fragtree build debugging messages */
+ #ifdef JFFS2_DBG_FRAGTREE_MESSAGES
+ #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_fragtree(fmt, ...)
++#define dbg_fragtree(fmt, ...) do {} while (0)
+ #endif
+ #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
+ #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_fragtree2(fmt, ...)
++#define dbg_fragtree2(fmt, ...) do {} while (0)
+ #endif
+
+ /* Directory entry list manilulation debugging messages */
+ #ifdef JFFS2_DBG_DENTLIST_MESSAGES
+ #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_dentlist(fmt, ...)
++#define dbg_dentlist(fmt, ...) do {} while (0)
+ #endif
+
+ /* Print the messages about manipulating node_refs */
+ #ifdef JFFS2_DBG_NODEREF_MESSAGES
+ #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_noderef(fmt, ...)
++#define dbg_noderef(fmt, ...) do {} while (0)
+ #endif
+
+ /* Manipulations with the list of inodes (JFFS2 inocache) */
+ #ifdef JFFS2_DBG_INOCACHE_MESSAGES
+ #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_inocache(fmt, ...)
++#define dbg_inocache(fmt, ...) do {} while (0)
+ #endif
+
+ /* Summary debugging messages */
+ #ifdef JFFS2_DBG_SUMMARY_MESSAGES
+ #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_summary(fmt, ...)
++#define dbg_summary(fmt, ...) do {} while (0)
+ #endif
+
+ /* File system build messages */
+ #ifdef JFFS2_DBG_FSBUILD_MESSAGES
+ #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_fsbuild(fmt, ...)
++#define dbg_fsbuild(fmt, ...) do {} while (0)
+ #endif
+
+ /* Watch the object allocations */
+ #ifdef JFFS2_DBG_MEMALLOC_MESSAGES
+ #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_memalloc(fmt, ...)
++#define dbg_memalloc(fmt, ...) do {} while (0)
+ #endif
+
+ /* Watch the XATTR subsystem */
+ #ifdef JFFS2_DBG_XATTR_MESSAGES
+ #define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+ #else
+-#define dbg_xattr(fmt, ...)
++#define dbg_xattr(fmt, ...) do {} while (0)
+ #endif
+
+ /* "Sanity" checks */
+diff -urNp linux-2.6.31.7/fs/jffs2/erase.c linux-2.6.31.7/fs/jffs2/erase.c
+--- linux-2.6.31.7/fs/jffs2/erase.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/jffs2/erase.c 2009-12-08 17:39:44.146795175 -0500
+@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
+ struct jffs2_unknown_node marker = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = cpu_to_je32(c->cleanmarker_size)
++ .totlen = cpu_to_je32(c->cleanmarker_size),
++ .hdr_crc = cpu_to_je32(0)
+ };
+
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff -urNp linux-2.6.31.7/fs/jffs2/summary.h linux-2.6.31.7/fs/jffs2/summary.h
+--- linux-2.6.31.7/fs/jffs2/summary.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/jffs2/summary.h 2009-12-08 17:39:44.146795175 -0500
+@@ -194,18 +194,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_
+
+ #define jffs2_sum_active() (0)
+ #define jffs2_sum_init(a) (0)
+-#define jffs2_sum_exit(a)
+-#define jffs2_sum_disable_collecting(a)
++#define jffs2_sum_exit(a) do {} while (0)
++#define jffs2_sum_disable_collecting(a) do {} while (0)
+ #define jffs2_sum_is_disabled(a) (0)
+-#define jffs2_sum_reset_collected(a)
++#define jffs2_sum_reset_collected(a) do {} while (0)
+ #define jffs2_sum_add_kvec(a,b,c,d) (0)
+-#define jffs2_sum_move_collected(a,b)
++#define jffs2_sum_move_collected(a,b) do {} while (0)
+ #define jffs2_sum_write_sumnode(a) (0)
+-#define jffs2_sum_add_padding_mem(a,b)
+-#define jffs2_sum_add_inode_mem(a,b,c)
+-#define jffs2_sum_add_dirent_mem(a,b,c)
+-#define jffs2_sum_add_xattr_mem(a,b,c)
+-#define jffs2_sum_add_xref_mem(a,b,c)
++#define jffs2_sum_add_padding_mem(a,b) do {} while (0)
++#define jffs2_sum_add_inode_mem(a,b,c) do {} while (0)
++#define jffs2_sum_add_dirent_mem(a,b,c) do {} while (0)
++#define jffs2_sum_add_xattr_mem(a,b,c) do {} while (0)
++#define jffs2_sum_add_xref_mem(a,b,c) do {} while (0)
+ #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
+
+ #endif /* CONFIG_JFFS2_SUMMARY */
+diff -urNp linux-2.6.31.7/fs/jffs2/wbuf.c linux-2.6.31.7/fs/jffs2/wbuf.c
+--- linux-2.6.31.7/fs/jffs2/wbuf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/jffs2/wbuf.c 2009-12-08 17:39:44.147754199 -0500
+@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
+ {
+ .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = constant_cpu_to_je32(8)
++ .totlen = constant_cpu_to_je32(8),
++ .hdr_crc = constant_cpu_to_je32(0)
+ };
+
+ /*
+diff -urNp linux-2.6.31.7/fs/lockd/clntproc.c linux-2.6.31.7/fs/lockd/clntproc.c
+--- linux-2.6.31.7/fs/lockd/clntproc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/lockd/clntproc.c 2009-12-08 17:39:44.147754199 -0500
+@@ -458,7 +458,7 @@ static void nlmclnt_locks_release_privat
+ nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
+ }
+
+-static struct file_lock_operations nlmclnt_lock_ops = {
++static const struct file_lock_operations nlmclnt_lock_ops = {
+ .fl_copy_lock = nlmclnt_locks_copy_lock,
+ .fl_release_private = nlmclnt_locks_release_private,
+ };
+diff -urNp linux-2.6.31.7/fs/lockd/svc.c linux-2.6.31.7/fs/lockd/svc.c
+--- linux-2.6.31.7/fs/lockd/svc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/lockd/svc.c 2009-12-08 17:39:44.148652807 -0500
+@@ -43,7 +43,7 @@
+
+ static struct svc_program nlmsvc_program;
+
+-struct nlmsvc_binding * nlmsvc_ops;
++const struct nlmsvc_binding * nlmsvc_ops;
+ EXPORT_SYMBOL_GPL(nlmsvc_ops);
+
+ static DEFINE_MUTEX(nlmsvc_mutex);
+diff -urNp linux-2.6.31.7/fs/lockd/svclock.c linux-2.6.31.7/fs/lockd/svclock.c
+--- linux-2.6.31.7/fs/lockd/svclock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/lockd/svclock.c 2009-12-08 17:39:44.148652807 -0500
+@@ -705,7 +705,7 @@ static int nlmsvc_same_owner(struct file
+ return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
+ }
+
+-struct lock_manager_operations nlmsvc_lock_operations = {
++const struct lock_manager_operations nlmsvc_lock_operations = {
+ .fl_compare_owner = nlmsvc_same_owner,
+ .fl_notify = nlmsvc_notify_blocked,
+ .fl_grant = nlmsvc_grant_deferred,
+diff -urNp linux-2.6.31.7/fs/locks.c linux-2.6.31.7/fs/locks.c
+--- linux-2.6.31.7/fs/locks.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/locks.c 2009-12-08 17:39:44.148652807 -0500
+@@ -434,7 +434,7 @@ static int lease_mylease_callback(struct
+ return fl->fl_file == try->fl_file;
+ }
+
+-static struct lock_manager_operations lease_manager_ops = {
++static const struct lock_manager_operations lease_manager_ops = {
+ .fl_break = lease_break_callback,
+ .fl_release_private = lease_release_private_callback,
+ .fl_mylease = lease_mylease_callback,
+@@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
+ return;
+
+ if (filp->f_op && filp->f_op->flock) {
+- struct file_lock fl = {
++ struct file_lock flock = {
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+- filp->f_op->flock(filp, F_SETLKW, &fl);
+- if (fl.fl_ops && fl.fl_ops->fl_release_private)
+- fl.fl_ops->fl_release_private(&fl);
++ filp->f_op->flock(filp, F_SETLKW, &flock);
++ if (flock.fl_ops && flock.fl_ops->fl_release_private)
++ flock.fl_ops->fl_release_private(&flock);
+ }
+
+ lock_kernel();
+diff -urNp linux-2.6.31.7/fs/namei.c linux-2.6.31.7/fs/namei.c
+--- linux-2.6.31.7/fs/namei.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/namei.c 2009-12-08 17:39:44.149794654 -0500
+@@ -631,7 +631,7 @@ static __always_inline int __do_follow_l
+ cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(cookie);
+ if (!IS_ERR(cookie)) {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ error = 0;
+ if (s)
+ error = __vfs_follow_link(nd, s);
+@@ -662,6 +662,13 @@ static inline int do_follow_link(struct
+ err = security_inode_follow_link(path->dentry, nd);
+ if (err)
+ goto loop;
++
++ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
++ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
++ err = -EACCES;
++ goto loop;
++ }
++
+ current->link_count++;
+ current->total_link_count++;
+ nd->depth++;
+@@ -1005,11 +1012,18 @@ return_reval:
+ break;
+ }
+ return_base:
++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++ path_put(&nd->path);
++ return -ENOENT;
++ }
+ return 0;
+ out_dput:
+ path_put_conditional(&next, nd);
+ break;
+ }
++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
++ err = -ENOENT;
++
+ path_put(&nd->path);
+ return_err:
+ return err;
+@@ -1610,12 +1624,19 @@ static int __open_namei_create(struct na
+ int error;
+ struct dentry *dir = nd->path.dentry;
+
++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
++ error = -EACCES;
++ goto out_unlock;
++ }
++
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current_umask();
+ error = security_path_mknod(&nd->path, path->dentry, mode, 0);
+ if (error)
+ goto out_unlock;
+ error = vfs_create(dir->d_inode, path->dentry, mode, nd);
++ if (!error)
++ gr_handle_create(path->dentry, nd->path.mnt);
+ out_unlock:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(nd->path.dentry);
+@@ -1698,6 +1719,22 @@ struct file *do_filp_open(int dfd, const
+ &nd, flag);
+ if (error)
+ return ERR_PTR(error);
++
++ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
++ error = -EACCES;
++ goto exit;
++ }
++
+ goto ok;
+ }
+
+@@ -1784,6 +1821,24 @@ do_last:
+ /*
+ * It already exists.
+ */
++
++ if (gr_handle_rofs_blockwrite(path.dentry, nd.path.mnt, acc_mode)) {
++ error = -EPERM;
++ goto exit_mutex_unlock;
++ }
++ if (gr_handle_rawio(path.dentry->d_inode)) {
++ error = -EPERM;
++ goto exit_mutex_unlock;
++ }
++ if (!gr_acl_handle_open(path.dentry, nd.path.mnt, flag)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++ if (gr_handle_fifo(path.dentry, nd.path.mnt, dir, flag, acc_mode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
+ mutex_unlock(&dir->d_inode->i_mutex);
+ audit_inode(pathname, path.dentry);
+
+@@ -1876,6 +1931,13 @@ do_link:
+ error = security_inode_follow_link(path.dentry, &nd);
+ if (error)
+ goto exit_dput;
++
++ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
++ path.dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit_dput;
++ }
++
+ error = __do_follow_link(&path, &nd);
+ if (error) {
+ /* Does someone understand code flow here? Or it is only
+@@ -2050,6 +2112,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ error = may_mknod(mode);
+ if (error)
+ goto out_dput;
++
++ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
++ error = -EPERM;
++ goto out_dput;
++ }
++
++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -2070,6 +2143,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ }
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
+ out_dput:
+ dput(dentry);
+ out_unlock:
+@@ -2123,6 +2199,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
+ if (IS_ERR(dentry))
+ goto out_unlock;
+
++ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ if (!IS_POSIXACL(nd.path.dentry->d_inode))
+ mode &= ~current_umask();
+ error = mnt_want_write(nd.path.mnt);
+@@ -2134,6 +2215,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
+ error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
++
+ out_dput:
+ dput(dentry);
+ out_unlock:
+@@ -2215,6 +2300,8 @@ static long do_rmdir(int dfd, const char
+ char * name;
+ struct dentry *dentry;
+ struct nameidata nd;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2239,6 +2326,19 @@ static long do_rmdir(int dfd, const char
+ error = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ goto exit2;
++
++ if (dentry->d_inode != NULL) {
++ if (dentry->d_inode->i_nlink <= 1) {
++ saved_ino = dentry->d_inode->i_ino;
++ saved_dev = dentry->d_inode->i_sb->s_dev;
++ }
++
++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit3;
++ }
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit3;
+@@ -2246,6 +2346,8 @@ static long do_rmdir(int dfd, const char
+ if (error)
+ goto exit4;
+ error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_dev || saved_ino))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit4:
+ mnt_drop_write(nd.path.mnt);
+ exit3:
+@@ -2307,6 +2409,8 @@ static long do_unlinkat(int dfd, const c
+ struct dentry *dentry;
+ struct nameidata nd;
+ struct inode *inode = NULL;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2326,8 +2430,19 @@ static long do_unlinkat(int dfd, const c
+ if (nd.last.name[nd.last.len])
+ goto slashes;
+ inode = dentry->d_inode;
+- if (inode)
++ if (inode) {
++ if (inode->i_nlink <= 1) {
++ saved_ino = inode->i_ino;
++ saved_dev = inode->i_sb->s_dev;
++ }
++
+ atomic_inc(&inode->i_count);
++
++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit2;
++ }
++ }
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit2;
+@@ -2335,6 +2450,8 @@ static long do_unlinkat(int dfd, const c
+ if (error)
+ goto exit3;
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_ino || saved_dev))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit3:
+ mnt_drop_write(nd.path.mnt);
+ exit2:
+@@ -2413,6 +2530,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
+ if (IS_ERR(dentry))
+ goto out_unlock;
+
++ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -2420,6 +2542,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
+ if (error)
+ goto out_drop_write;
+ error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
++ if (!error)
++ gr_handle_create(dentry, nd.path.mnt);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+@@ -2513,6 +2637,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
+ error = PTR_ERR(new_dentry);
+ if (IS_ERR(new_dentry))
+ goto out_unlock;
++
++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
++ old_path.dentry->d_inode,
++ old_path.dentry->d_inode->i_mode, to)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
++ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
++ old_path.dentry, old_path.mnt, to)) {
++ error = -EACCES;
++ goto out_dput;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+@@ -2520,6 +2658,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
+ if (error)
+ goto out_drop_write;
+ error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
++ if (!error)
++ gr_handle_create(new_dentry, nd.path.mnt);
+ out_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+@@ -2753,6 +2893,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ if (new_dentry == trap)
+ goto exit5;
+
++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
++ old_dentry, old_dir->d_inode, oldnd.path.mnt,
++ to);
++ if (error)
++ goto exit5;
++
+ error = mnt_want_write(oldnd.path.mnt);
+ if (error)
+ goto exit5;
+@@ -2762,6 +2908,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ goto exit6;
+ error = vfs_rename(old_dir->d_inode, old_dentry,
+ new_dir->d_inode, new_dentry);
++ if (!error)
++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
+ exit6:
+ mnt_drop_write(oldnd.path.mnt);
+ exit5:
+diff -urNp linux-2.6.31.7/fs/namespace.c linux-2.6.31.7/fs/namespace.c
+--- linux-2.6.31.7/fs/namespace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/namespace.c 2009-12-08 17:39:44.150795801 -0500
+@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+ up_write(&sb->s_umount);
++
++ gr_log_remount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
+ security_sb_umount_busy(mnt);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
++
++ gr_log_unmount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -1940,6 +1946,16 @@ long do_mount(char *dev_name, char *dir_
+ if (retval)
+ goto dput_out;
+
++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+ data_page);
+@@ -1954,6 +1970,9 @@ long do_mount(char *dev_name, char *dir_
+ dev_name, data_page);
+ dput_out:
+ path_put(&path);
++
++ gr_log_mount(dev_name, dir_name, retval);
++
+ return retval;
+ }
+
+@@ -2158,6 +2177,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
+ goto out1;
+ }
+
++ if (gr_handle_chroot_pivot()) {
++ error = -EPERM;
++ path_put(&old);
++ goto out1;
++ }
++
+ read_lock(&current->fs->lock);
+ root = current->fs->root;
+ path_get(&current->fs->root);
+diff -urNp linux-2.6.31.7/fs/nfs/client.c linux-2.6.31.7/fs/nfs/client.c
+--- linux-2.6.31.7/fs/nfs/client.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfs/client.c 2009-12-08 17:39:44.150795801 -0500
+@@ -1533,7 +1533,7 @@ static void *nfs_server_list_next(struct
+ static void nfs_server_list_stop(struct seq_file *p, void *v);
+ static int nfs_server_list_show(struct seq_file *m, void *v);
+
+-static struct seq_operations nfs_server_list_ops = {
++static const struct seq_operations nfs_server_list_ops = {
+ .start = nfs_server_list_start,
+ .next = nfs_server_list_next,
+ .stop = nfs_server_list_stop,
+@@ -1554,7 +1554,7 @@ static void *nfs_volume_list_next(struct
+ static void nfs_volume_list_stop(struct seq_file *p, void *v);
+ static int nfs_volume_list_show(struct seq_file *m, void *v);
+
+-static struct seq_operations nfs_volume_list_ops = {
++static const struct seq_operations nfs_volume_list_ops = {
+ .start = nfs_volume_list_start,
+ .next = nfs_volume_list_next,
+ .stop = nfs_volume_list_stop,
+diff -urNp linux-2.6.31.7/fs/nfs/file.c linux-2.6.31.7/fs/nfs/file.c
+--- linux-2.6.31.7/fs/nfs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfs/file.c 2009-12-08 17:39:44.151796246 -0500
+@@ -59,7 +59,7 @@ static int nfs_lock(struct file *filp, i
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+
+-static struct vm_operations_struct nfs_file_vm_ops;
++static const struct vm_operations_struct nfs_file_vm_ops;
+
+ const struct file_operations nfs_file_operations = {
+ .llseek = nfs_file_llseek,
+@@ -526,7 +526,7 @@ out_unlock:
+ return VM_FAULT_SIGBUS;
+ }
+
+-static struct vm_operations_struct nfs_file_vm_ops = {
++static const struct vm_operations_struct nfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = nfs_vm_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/nfs/nfs4proc.c linux-2.6.31.7/fs/nfs/nfs4proc.c
+--- linux-2.6.31.7/fs/nfs/nfs4proc.c 2009-12-08 17:29:51.633741922 -0500
++++ linux-2.6.31.7/fs/nfs/nfs4proc.c 2009-12-08 17:39:44.165657713 -0500
+@@ -1128,7 +1128,7 @@ static int _nfs4_do_open_reclaim(struct
+ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = _nfs4_do_open_reclaim(ctx, state);
+@@ -1170,7 +1170,7 @@ static int _nfs4_open_delegation_recall(
+
+ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ int err;
+ do {
+@@ -1486,7 +1486,7 @@ static int _nfs4_open_expired(struct nfs
+ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -1584,7 +1584,7 @@ out_err:
+
+ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ struct nfs4_state *res;
+ int status;
+
+@@ -1675,7 +1675,7 @@ static int nfs4_do_setattr(struct inode
+ struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2019,7 +2019,7 @@ static int _nfs4_server_capabilities(str
+
+ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2053,7 +2053,7 @@ static int _nfs4_lookup_root(struct nfs_
+ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2142,7 +2142,7 @@ static int _nfs4_proc_getattr(struct nfs
+
+ static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2230,7 +2230,7 @@ static int nfs4_proc_lookupfh(struct nfs
+ struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
+@@ -2259,7 +2259,7 @@ static int _nfs4_proc_lookup(struct inod
+
+ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+@@ -2323,7 +2323,7 @@ static int _nfs4_proc_access(struct inod
+
+ static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+@@ -2379,7 +2379,7 @@ static int _nfs4_proc_readlink(struct in
+ static int nfs4_proc_readlink(struct inode *inode, struct page *page,
+ unsigned int pgbase, unsigned int pglen)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+@@ -2477,7 +2477,7 @@ static int _nfs4_proc_remove(struct inod
+
+ static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+@@ -2551,7 +2551,7 @@ static int _nfs4_proc_rename(struct inod
+ static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(old_dir),
+@@ -2598,7 +2598,7 @@ static int _nfs4_proc_link(struct inode
+
+ static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+@@ -2690,7 +2690,7 @@ out:
+ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ struct page *page, unsigned int len, struct iattr *sattr)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+@@ -2721,7 +2721,7 @@ out:
+ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+@@ -2770,7 +2770,7 @@ static int _nfs4_proc_readdir(struct den
+ static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page *page, unsigned int count, int plus)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
+@@ -2818,7 +2818,7 @@ out:
+ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr, dev_t rdev)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+@@ -2850,7 +2850,7 @@ static int _nfs4_proc_statfs(struct nfs_
+
+ static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2881,7 +2881,7 @@ static int _nfs4_do_fsinfo(struct nfs_se
+
+ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -2927,7 +2927,7 @@ static int _nfs4_proc_pathconf(struct nf
+ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *pathconf)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -3226,7 +3226,7 @@ out_free:
+
+ static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ ssize_t ret;
+ do {
+ ret = __nfs4_get_acl_uncached(inode, buf, buflen);
+@@ -3282,7 +3282,7 @@ static int __nfs4_proc_set_acl(struct in
+
+ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+@@ -3547,7 +3547,7 @@ out:
+ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
+ {
+ struct nfs_server *server = NFS_SERVER(inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+ do {
+ err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
+@@ -3620,7 +3620,7 @@ out:
+
+ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -3994,7 +3994,7 @@ static int _nfs4_do_setlk(struct nfs4_st
+ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -4012,7 +4012,7 @@ static int nfs4_lock_reclaim(struct nfs4
+ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ err = nfs4_set_lock_state(state, request);
+@@ -4067,7 +4067,7 @@ out:
+
+ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ do {
+@@ -4127,7 +4127,7 @@ nfs4_proc_lock(struct file *filp, int cm
+ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {0, 0};
+ int err;
+
+ err = nfs4_set_lock_state(state, fl);
+diff -urNp linux-2.6.31.7/fs/nfs/nfs4state.c linux-2.6.31.7/fs/nfs/nfs4state.c
+--- linux-2.6.31.7/fs/nfs/nfs4state.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfs/nfs4state.c 2009-12-08 17:39:44.165657713 -0500
+@@ -638,7 +638,7 @@ static void nfs4_fl_release_lock(struct
+ nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
+ }
+
+-static struct file_lock_operations nfs4_fl_lock_ops = {
++static const struct file_lock_operations nfs4_fl_lock_ops = {
+ .fl_copy_lock = nfs4_fl_copy_lock,
+ .fl_release_private = nfs4_fl_release_lock,
+ };
+diff -urNp linux-2.6.31.7/fs/nfsd/export.c linux-2.6.31.7/fs/nfsd/export.c
+--- linux-2.6.31.7/fs/nfsd/export.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfsd/export.c 2009-12-08 17:39:44.166775951 -0500
+@@ -1505,7 +1505,7 @@ static int e_show(struct seq_file *m, vo
+ return svc_export_show(m, &svc_export_cache, cp);
+ }
+
+-struct seq_operations nfs_exports_op = {
++const struct seq_operations nfs_exports_op = {
+ .start = e_start,
+ .next = e_next,
+ .stop = e_stop,
+diff -urNp linux-2.6.31.7/fs/nfsd/lockd.c linux-2.6.31.7/fs/nfsd/lockd.c
+--- linux-2.6.31.7/fs/nfsd/lockd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfsd/lockd.c 2009-12-08 17:39:44.166775951 -0500
+@@ -67,7 +67,7 @@ nlm_fclose(struct file *filp)
+ fput(filp);
+ }
+
+-static struct nlmsvc_binding nfsd_nlm_ops = {
++static const struct nlmsvc_binding nfsd_nlm_ops = {
+ .fopen = nlm_fopen, /* open file for locking */
+ .fclose = nlm_fclose, /* close file */
+ };
+diff -urNp linux-2.6.31.7/fs/nfsd/nfs4state.c linux-2.6.31.7/fs/nfsd/nfs4state.c
+--- linux-2.6.31.7/fs/nfsd/nfs4state.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfsd/nfs4state.c 2009-12-08 17:39:44.167778465 -0500
+@@ -2163,7 +2163,7 @@ int nfsd_change_deleg_cb(struct file_loc
+ return -EAGAIN;
+ }
+
+-static struct lock_manager_operations nfsd_lease_mng_ops = {
++static const struct lock_manager_operations nfsd_lease_mng_ops = {
+ .fl_break = nfsd_break_deleg_cb,
+ .fl_release_private = nfsd_release_deleg_cb,
+ .fl_copy_lock = nfsd_copy_lock_deleg_cb,
+@@ -3368,7 +3368,7 @@ nfs4_transform_lock_offset(struct file_l
+
+ /* Hack!: For now, we're defining this just so we can use a pointer to it
+ * as a unique cookie to identify our (NFSv4's) posix locks. */
+-static struct lock_manager_operations nfsd_posix_mng_ops = {
++static const struct lock_manager_operations nfsd_posix_mng_ops = {
+ };
+
+ static inline void
+diff -urNp linux-2.6.31.7/fs/nfsd/nfsctl.c linux-2.6.31.7/fs/nfsd/nfsctl.c
+--- linux-2.6.31.7/fs/nfsd/nfsctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfsd/nfsctl.c 2009-12-08 17:39:44.180824061 -0500
+@@ -174,7 +174,7 @@ static const struct file_operations expo
+
+ extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
+
+-static struct file_operations pool_stats_operations = {
++static const struct file_operations pool_stats_operations = {
+ .open = nfsd_pool_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/fs/nfsd/vfs.c linux-2.6.31.7/fs/nfsd/vfs.c
+--- linux-2.6.31.7/fs/nfsd/vfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nfsd/vfs.c 2009-12-08 17:39:44.180824061 -0500
+@@ -930,7 +930,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
+ } else {
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ }
+
+@@ -1053,7 +1053,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
+
+ /* Write the data. */
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ if (host_err < 0)
+ goto out_nfserr;
+@@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
+ */
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = inode->i_op->readlink(dentry, buf, *lenp);
++ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
+ set_fs(oldfs);
+
+ if (host_err < 0)
+diff -urNp linux-2.6.31.7/fs/nilfs2/btnode.c linux-2.6.31.7/fs/nilfs2/btnode.c
+--- linux-2.6.31.7/fs/nilfs2/btnode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/btnode.c 2009-12-08 17:39:44.181797872 -0500
+@@ -47,7 +47,7 @@ void nilfs_btnode_cache_init_once(struct
+ INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
+ }
+
+-static struct address_space_operations def_btnode_aops = {
++static const struct address_space_operations def_btnode_aops = {
+ .sync_page = block_sync_page,
+ };
+
+diff -urNp linux-2.6.31.7/fs/nilfs2/dir.c linux-2.6.31.7/fs/nilfs2/dir.c
+--- linux-2.6.31.7/fs/nilfs2/dir.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/dir.c 2009-12-08 17:39:44.181797872 -0500
+@@ -697,7 +697,7 @@ not_empty:
+ return 0;
+ }
+
+-struct file_operations nilfs_dir_operations = {
++const struct file_operations nilfs_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .readdir = nilfs_readdir,
+diff -urNp linux-2.6.31.7/fs/nilfs2/file.c linux-2.6.31.7/fs/nilfs2/file.c
+--- linux-2.6.31.7/fs/nilfs2/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/file.c 2009-12-08 17:39:44.181797872 -0500
+@@ -117,7 +117,7 @@ static int nilfs_page_mkwrite(struct vm_
+ return 0;
+ }
+
+-struct vm_operations_struct nilfs_file_vm_ops = {
++const struct vm_operations_struct nilfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = nilfs_page_mkwrite,
+ };
+@@ -134,7 +134,7 @@ static int nilfs_file_mmap(struct file *
+ * We have mostly NULL's here: the current defaults are ok for
+ * the nilfs filesystem.
+ */
+-struct file_operations nilfs_file_operations = {
++const struct file_operations nilfs_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+@@ -151,7 +151,7 @@ struct file_operations nilfs_file_operat
+ .splice_read = generic_file_splice_read,
+ };
+
+-struct inode_operations nilfs_file_inode_operations = {
++const struct inode_operations nilfs_file_inode_operations = {
+ .truncate = nilfs_truncate,
+ .setattr = nilfs_setattr,
+ .permission = nilfs_permission,
+diff -urNp linux-2.6.31.7/fs/nilfs2/gcinode.c linux-2.6.31.7/fs/nilfs2/gcinode.c
+--- linux-2.6.31.7/fs/nilfs2/gcinode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/gcinode.c 2009-12-08 17:39:44.181797872 -0500
+@@ -52,7 +52,7 @@
+ #include "dat.h"
+ #include "ifile.h"
+
+-static struct address_space_operations def_gcinode_aops = {
++static const struct address_space_operations def_gcinode_aops = {
+ .sync_page = block_sync_page,
+ };
+
+diff -urNp linux-2.6.31.7/fs/nilfs2/inode.c linux-2.6.31.7/fs/nilfs2/inode.c
+--- linux-2.6.31.7/fs/nilfs2/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/inode.c 2009-12-08 17:39:44.189215422 -0500
+@@ -238,7 +238,7 @@ nilfs_direct_IO(int rw, struct kiocb *io
+ return size;
+ }
+
+-struct address_space_operations nilfs_aops = {
++const struct address_space_operations nilfs_aops = {
+ .writepage = nilfs_writepage,
+ .readpage = nilfs_readpage,
+ .sync_page = block_sync_page,
+diff -urNp linux-2.6.31.7/fs/nilfs2/mdt.c linux-2.6.31.7/fs/nilfs2/mdt.c
+--- linux-2.6.31.7/fs/nilfs2/mdt.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/mdt.c 2009-12-08 17:39:44.189773061 -0500
+@@ -430,7 +430,7 @@ nilfs_mdt_write_page(struct page *page,
+ }
+
+
+-static struct address_space_operations def_mdt_aops = {
++static const struct address_space_operations def_mdt_aops = {
+ .writepage = nilfs_mdt_write_page,
+ .sync_page = block_sync_page,
+ };
+diff -urNp linux-2.6.31.7/fs/nilfs2/namei.c linux-2.6.31.7/fs/nilfs2/namei.c
+--- linux-2.6.31.7/fs/nilfs2/namei.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/namei.c 2009-12-08 17:39:44.189773061 -0500
+@@ -448,7 +448,7 @@ out:
+ return err;
+ }
+
+-struct inode_operations nilfs_dir_inode_operations = {
++const struct inode_operations nilfs_dir_inode_operations = {
+ .create = nilfs_create,
+ .lookup = nilfs_lookup,
+ .link = nilfs_link,
+@@ -462,12 +462,12 @@ struct inode_operations nilfs_dir_inode_
+ .permission = nilfs_permission,
+ };
+
+-struct inode_operations nilfs_special_inode_operations = {
++const struct inode_operations nilfs_special_inode_operations = {
+ .setattr = nilfs_setattr,
+ .permission = nilfs_permission,
+ };
+
+-struct inode_operations nilfs_symlink_inode_operations = {
++const struct inode_operations nilfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+diff -urNp linux-2.6.31.7/fs/nilfs2/nilfs.h linux-2.6.31.7/fs/nilfs2/nilfs.h
+--- linux-2.6.31.7/fs/nilfs2/nilfs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/nilfs.h 2009-12-08 17:39:44.189773061 -0500
+@@ -294,13 +294,13 @@ void nilfs_clear_gcdat_inode(struct the_
+ /*
+ * Inodes and files operations
+ */
+-extern struct file_operations nilfs_dir_operations;
+-extern struct inode_operations nilfs_file_inode_operations;
+-extern struct file_operations nilfs_file_operations;
+-extern struct address_space_operations nilfs_aops;
+-extern struct inode_operations nilfs_dir_inode_operations;
+-extern struct inode_operations nilfs_special_inode_operations;
+-extern struct inode_operations nilfs_symlink_inode_operations;
++extern const struct file_operations nilfs_dir_operations;
++extern const struct inode_operations nilfs_file_inode_operations;
++extern const struct file_operations nilfs_file_operations;
++extern const struct address_space_operations nilfs_aops;
++extern const struct inode_operations nilfs_dir_inode_operations;
++extern const struct inode_operations nilfs_special_inode_operations;
++extern const struct inode_operations nilfs_symlink_inode_operations;
+
+ /*
+ * filesystem type
+diff -urNp linux-2.6.31.7/fs/nilfs2/super.c linux-2.6.31.7/fs/nilfs2/super.c
+--- linux-2.6.31.7/fs/nilfs2/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nilfs2/super.c 2009-12-08 17:39:44.191735727 -0500
+@@ -529,7 +529,7 @@ static int nilfs_statfs(struct dentry *d
+ return 0;
+ }
+
+-static struct super_operations nilfs_sops = {
++static const struct super_operations nilfs_sops = {
+ .alloc_inode = nilfs_alloc_inode,
+ .destroy_inode = nilfs_destroy_inode,
+ .dirty_inode = nilfs_dirty_inode,
+diff -urNp linux-2.6.31.7/fs/nls/nls_base.c linux-2.6.31.7/fs/nls/nls_base.c
+--- linux-2.6.31.7/fs/nls/nls_base.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/nls/nls_base.c 2009-12-08 17:39:44.191735727 -0500
+@@ -41,7 +41,7 @@ static const struct utf8_table utf8_tabl
+ {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */},
+ {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */},
+ {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */},
+- {0, /* end of table */}
++ {0, 0, 0, 0, 0, /* end of table */}
+ };
+
+ #define UNICODE_MAX 0x0010ffff
+diff -urNp linux-2.6.31.7/fs/ntfs/file.c linux-2.6.31.7/fs/ntfs/file.c
+--- linux-2.6.31.7/fs/ntfs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ntfs/file.c 2009-12-08 17:39:44.192799571 -0500
+@@ -2291,6 +2291,6 @@ const struct inode_operations ntfs_file_
+ #endif /* NTFS_RW */
+ };
+
+-const struct file_operations ntfs_empty_file_ops = {};
++const struct file_operations ntfs_empty_file_ops __read_only;
+
+-const struct inode_operations ntfs_empty_inode_ops = {};
++const struct inode_operations ntfs_empty_inode_ops __read_only;
+diff -urNp linux-2.6.31.7/fs/ocfs2/cluster/heartbeat.c linux-2.6.31.7/fs/ocfs2/cluster/heartbeat.c
+--- linux-2.6.31.7/fs/ocfs2/cluster/heartbeat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/cluster/heartbeat.c 2009-12-08 17:39:44.192799571 -0500
+@@ -966,7 +966,7 @@ static ssize_t o2hb_debug_read(struct fi
+ }
+ #endif /* CONFIG_DEBUG_FS */
+
+-static struct file_operations o2hb_debug_fops = {
++static const struct file_operations o2hb_debug_fops = {
+ .open = o2hb_debug_open,
+ .release = o2hb_debug_release,
+ .read = o2hb_debug_read,
+diff -urNp linux-2.6.31.7/fs/ocfs2/cluster/masklog.c linux-2.6.31.7/fs/ocfs2/cluster/masklog.c
+--- linux-2.6.31.7/fs/ocfs2/cluster/masklog.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/cluster/masklog.c 2009-12-08 17:39:44.192799571 -0500
+@@ -134,7 +134,7 @@ static ssize_t mlog_store(struct kobject
+ return mlog_mask_store(mlog_attr->mask, buf, count);
+ }
+
+-static struct sysfs_ops mlog_attr_ops = {
++static const struct sysfs_ops mlog_attr_ops = {
+ .show = mlog_show,
+ .store = mlog_store,
+ };
+diff -urNp linux-2.6.31.7/fs/ocfs2/cluster/netdebug.c linux-2.6.31.7/fs/ocfs2/cluster/netdebug.c
+--- linux-2.6.31.7/fs/ocfs2/cluster/netdebug.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/cluster/netdebug.c 2009-12-08 17:39:44.193657267 -0500
+@@ -163,7 +163,7 @@ static void nst_seq_stop(struct seq_file
+ {
+ }
+
+-static struct seq_operations nst_seq_ops = {
++static const struct seq_operations nst_seq_ops = {
+ .start = nst_seq_start,
+ .next = nst_seq_next,
+ .stop = nst_seq_stop,
+@@ -207,7 +207,7 @@ static int nst_fop_release(struct inode
+ return seq_release_private(inode, file);
+ }
+
+-static struct file_operations nst_seq_fops = {
++static const struct file_operations nst_seq_fops = {
+ .open = nst_fop_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -344,7 +344,7 @@ static void sc_seq_stop(struct seq_file
+ {
+ }
+
+-static struct seq_operations sc_seq_ops = {
++static const struct seq_operations sc_seq_ops = {
+ .start = sc_seq_start,
+ .next = sc_seq_next,
+ .stop = sc_seq_stop,
+@@ -388,7 +388,7 @@ static int sc_fop_release(struct inode *
+ return seq_release_private(inode, file);
+ }
+
+-static struct file_operations sc_seq_fops = {
++static const struct file_operations sc_seq_fops = {
+ .open = sc_fop_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/fs/ocfs2/dlm/dlmdebug.c linux-2.6.31.7/fs/ocfs2/dlm/dlmdebug.c
+--- linux-2.6.31.7/fs/ocfs2/dlm/dlmdebug.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/dlm/dlmdebug.c 2009-12-08 17:39:44.193657267 -0500
+@@ -479,7 +479,7 @@ bail:
+ return -ENOMEM;
+ }
+
+-static struct file_operations debug_purgelist_fops = {
++static const struct file_operations debug_purgelist_fops = {
+ .open = debug_purgelist_open,
+ .release = debug_buffer_release,
+ .read = debug_buffer_read,
+@@ -539,7 +539,7 @@ bail:
+ return -ENOMEM;
+ }
+
+-static struct file_operations debug_mle_fops = {
++static const struct file_operations debug_mle_fops = {
+ .open = debug_mle_open,
+ .release = debug_buffer_release,
+ .read = debug_buffer_read,
+@@ -683,7 +683,7 @@ static int lockres_seq_show(struct seq_f
+ return 0;
+ }
+
+-static struct seq_operations debug_lockres_ops = {
++static const struct seq_operations debug_lockres_ops = {
+ .start = lockres_seq_start,
+ .stop = lockres_seq_stop,
+ .next = lockres_seq_next,
+@@ -742,7 +742,7 @@ static int debug_lockres_release(struct
+ return seq_release_private(inode, file);
+ }
+
+-static struct file_operations debug_lockres_fops = {
++static const struct file_operations debug_lockres_fops = {
+ .open = debug_lockres_open,
+ .release = debug_lockres_release,
+ .read = seq_read,
+@@ -926,7 +926,7 @@ bail:
+ return -ENOMEM;
+ }
+
+-static struct file_operations debug_state_fops = {
++static const struct file_operations debug_state_fops = {
+ .open = debug_state_open,
+ .release = debug_buffer_release,
+ .read = debug_buffer_read,
+diff -urNp linux-2.6.31.7/fs/ocfs2/localalloc.c linux-2.6.31.7/fs/ocfs2/localalloc.c
+--- linux-2.6.31.7/fs/ocfs2/localalloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/localalloc.c 2009-12-08 17:39:44.194700140 -0500
+@@ -1186,7 +1186,7 @@ static int ocfs2_local_alloc_slide_windo
+ goto bail;
+ }
+
+- atomic_inc(&osb->alloc_stats.moves);
++ atomic_inc_unchecked(&osb->alloc_stats.moves);
+
+ status = 0;
+ bail:
+diff -urNp linux-2.6.31.7/fs/ocfs2/mmap.c linux-2.6.31.7/fs/ocfs2/mmap.c
+--- linux-2.6.31.7/fs/ocfs2/mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/mmap.c 2009-12-08 17:39:44.194700140 -0500
+@@ -202,7 +202,7 @@ out:
+ return ret;
+ }
+
+-static struct vm_operations_struct ocfs2_file_vm_ops = {
++static const struct vm_operations_struct ocfs2_file_vm_ops = {
+ .fault = ocfs2_fault,
+ .page_mkwrite = ocfs2_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/ocfs2/ocfs2.h linux-2.6.31.7/fs/ocfs2/ocfs2.h
+--- linux-2.6.31.7/fs/ocfs2/ocfs2.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/ocfs2.h 2009-12-08 17:39:44.194700140 -0500
+@@ -191,11 +191,11 @@ enum ocfs2_vol_state
+
+ struct ocfs2_alloc_stats
+ {
+- atomic_t moves;
+- atomic_t local_data;
+- atomic_t bitmap_data;
+- atomic_t bg_allocs;
+- atomic_t bg_extends;
++ atomic_unchecked_t moves;
++ atomic_unchecked_t local_data;
++ atomic_unchecked_t bitmap_data;
++ atomic_unchecked_t bg_allocs;
++ atomic_unchecked_t bg_extends;
+ };
+
+ enum ocfs2_local_alloc_state
+diff -urNp linux-2.6.31.7/fs/ocfs2/suballoc.c linux-2.6.31.7/fs/ocfs2/suballoc.c
+--- linux-2.6.31.7/fs/ocfs2/suballoc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/suballoc.c 2009-12-08 17:39:44.195679943 -0500
+@@ -620,7 +620,7 @@ static int ocfs2_reserve_suballoc_bits(s
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_extends);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+
+ /* You should never ask for this much metadata */
+ BUG_ON(bits_wanted >
+@@ -1650,7 +1650,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
+
+ *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
+ ac->ac_bits_given += (*num_bits);
+@@ -1724,7 +1724,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
+
+ BUG_ON(num_bits != 1);
+
+@@ -1826,7 +1826,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
+ cluster_start,
+ num_clusters);
+ if (!status)
+- atomic_inc(&osb->alloc_stats.local_data);
++ atomic_inc_unchecked(&osb->alloc_stats.local_data);
+ } else {
+ if (min_clusters > (osb->bitmap_cpg - 1)) {
+ /* The only paths asking for contiguousness
+@@ -1854,7 +1854,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
+ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+ bg_blkno,
+ bg_bit_off);
+- atomic_inc(&osb->alloc_stats.bitmap_data);
++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+ }
+ }
+ if (status < 0) {
+diff -urNp linux-2.6.31.7/fs/ocfs2/super.c linux-2.6.31.7/fs/ocfs2/super.c
+--- linux-2.6.31.7/fs/ocfs2/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ocfs2/super.c 2009-12-08 17:39:44.201804478 -0500
+@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
+ "%10s => GlobalAllocs: %d LocalAllocs: %d "
+ "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
+ "Stats",
+- atomic_read(&osb->alloc_stats.bitmap_data),
+- atomic_read(&osb->alloc_stats.local_data),
+- atomic_read(&osb->alloc_stats.bg_allocs),
+- atomic_read(&osb->alloc_stats.moves),
+- atomic_read(&osb->alloc_stats.bg_extends));
++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++ atomic_read_unchecked(&osb->alloc_stats.local_data),
++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++ atomic_read_unchecked(&osb->alloc_stats.moves),
++ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %u Descriptor: %llu Size: %u bits "
+@@ -373,7 +373,7 @@ static ssize_t ocfs2_debug_read(struct f
+ }
+ #endif /* CONFIG_DEBUG_FS */
+
+-static struct file_operations ocfs2_osb_debug_fops = {
++static const struct file_operations ocfs2_osb_debug_fops = {
+ .open = ocfs2_osb_debug_open,
+ .release = ocfs2_debug_release,
+ .read = ocfs2_debug_read,
+@@ -1991,11 +1991,11 @@ static int ocfs2_initialize_super(struct
+ spin_lock_init(&osb->osb_xattr_lock);
+ ocfs2_init_inode_steal_slot(osb);
+
+- atomic_set(&osb->alloc_stats.moves, 0);
+- atomic_set(&osb->alloc_stats.local_data, 0);
+- atomic_set(&osb->alloc_stats.bitmap_data, 0);
+- atomic_set(&osb->alloc_stats.bg_allocs, 0);
+- atomic_set(&osb->alloc_stats.bg_extends, 0);
++ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+
+ /* Copy the blockcheck stats from the superblock probe */
+ osb->osb_ecc_stats = *stats;
+diff -urNp linux-2.6.31.7/fs/omfs/dir.c linux-2.6.31.7/fs/omfs/dir.c
+--- linux-2.6.31.7/fs/omfs/dir.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/omfs/dir.c 2009-12-08 17:39:44.201804478 -0500
+@@ -489,7 +489,7 @@ out:
+ return ret;
+ }
+
+-struct inode_operations omfs_dir_inops = {
++const struct inode_operations omfs_dir_inops = {
+ .lookup = omfs_lookup,
+ .mkdir = omfs_mkdir,
+ .rename = omfs_rename,
+@@ -498,7 +498,7 @@ struct inode_operations omfs_dir_inops =
+ .rmdir = omfs_rmdir,
+ };
+
+-struct file_operations omfs_dir_operations = {
++const struct file_operations omfs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = omfs_readdir,
+ .llseek = generic_file_llseek,
+diff -urNp linux-2.6.31.7/fs/omfs/file.c linux-2.6.31.7/fs/omfs/file.c
+--- linux-2.6.31.7/fs/omfs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/omfs/file.c 2009-12-08 17:39:44.202799759 -0500
+@@ -322,7 +322,7 @@ static sector_t omfs_bmap(struct address
+ return generic_block_bmap(mapping, block, omfs_get_block);
+ }
+
+-struct file_operations omfs_file_operations = {
++const struct file_operations omfs_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+@@ -333,11 +333,11 @@ struct file_operations omfs_file_operati
+ .splice_read = generic_file_splice_read,
+ };
+
+-struct inode_operations omfs_file_inops = {
++const struct inode_operations omfs_file_inops = {
+ .truncate = omfs_truncate
+ };
+
+-struct address_space_operations omfs_aops = {
++const struct address_space_operations omfs_aops = {
+ .readpage = omfs_readpage,
+ .readpages = omfs_readpages,
+ .writepage = omfs_writepage,
+diff -urNp linux-2.6.31.7/fs/omfs/inode.c linux-2.6.31.7/fs/omfs/inode.c
+--- linux-2.6.31.7/fs/omfs/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/omfs/inode.c 2009-12-08 17:39:44.202799759 -0500
+@@ -278,7 +278,7 @@ static int omfs_statfs(struct dentry *de
+ return 0;
+ }
+
+-static struct super_operations omfs_sops = {
++static const struct super_operations omfs_sops = {
+ .write_inode = omfs_write_inode,
+ .delete_inode = omfs_delete_inode,
+ .put_super = omfs_put_super,
+diff -urNp linux-2.6.31.7/fs/omfs/omfs.h linux-2.6.31.7/fs/omfs/omfs.h
+--- linux-2.6.31.7/fs/omfs/omfs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/omfs/omfs.h 2009-12-08 17:39:44.202799759 -0500
+@@ -44,16 +44,16 @@ extern int omfs_allocate_range(struct su
+ extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
+
+ /* dir.c */
+-extern struct file_operations omfs_dir_operations;
+-extern struct inode_operations omfs_dir_inops;
++extern const struct file_operations omfs_dir_operations;
++extern const struct inode_operations omfs_dir_inops;
+ extern int omfs_make_empty(struct inode *inode, struct super_block *sb);
+ extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
+ u64 fsblock);
+
+ /* file.c */
+-extern struct file_operations omfs_file_operations;
+-extern struct inode_operations omfs_file_inops;
+-extern struct address_space_operations omfs_aops;
++extern const struct file_operations omfs_file_operations;
++extern const struct inode_operations omfs_file_inops;
++extern const struct address_space_operations omfs_aops;
+ extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
+ extern int omfs_shrink_inode(struct inode *inode);
+
+diff -urNp linux-2.6.31.7/fs/open.c linux-2.6.31.7/fs/open.c
+--- linux-2.6.31.7/fs/open.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/open.c 2009-12-08 17:39:44.203714889 -0500
+@@ -206,6 +206,9 @@ int do_truncate(struct dentry *dentry, l
+ if (length < 0)
+ return -EINVAL;
+
++ if (filp && !gr_acl_handle_truncate(dentry, filp->f_path.mnt))
++ return -EACCES;
++
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | time_attrs;
+ if (filp) {
+@@ -510,6 +513,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
+ if (__mnt_is_readonly(path.mnt))
+ res = -EROFS;
+
++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
++ res = -EACCES;
++
+ out_path_release:
+ path_put(&path);
+ out:
+@@ -536,6 +542,8 @@ SYSCALL_DEFINE1(chdir, const char __user
+ if (error)
+ goto dput_and_out;
+
++ gr_log_chdir(path.dentry, path.mnt);
++
+ set_fs_pwd(current->fs, &path);
+
+ dput_and_out:
+@@ -562,6 +570,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
+ goto out_putf;
+
+ error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
++
++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
++ error = -EPERM;
++
++ if (!error)
++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
++
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+@@ -587,7 +602,18 @@ SYSCALL_DEFINE1(chroot, const char __use
+ if (!capable(CAP_SYS_CHROOT))
+ goto dput_and_out;
+
++ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
++ goto dput_and_out;
++
++ if (gr_handle_chroot_caps(&path)) {
++ error = -ENOMEM;
++ goto dput_and_out;
++ }
++
+ set_fs_root(current->fs, &path);
++
++ gr_handle_chroot_chdir(&path);
++
+ error = 0;
+ dput_and_out:
+ path_put(&path);
+@@ -615,13 +641,28 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
+ err = mnt_want_write_file(file);
+ if (err)
+ goto out_putf;
++
++ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
++ err = -EACCES;
++ goto out_drop_write;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
++ err = -EPERM;
++ mutex_unlock(&inode->i_mutex);
++ goto out_drop_write;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ err = notify_change(dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
++
++out_drop_write:
+ mnt_drop_write(file->f_path.mnt);
+ out_putf:
+ fput(file);
+@@ -644,13 +685,28 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto dput_and_out;
++
++ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
++ error = -EACCES;
++ mutex_unlock(&inode->i_mutex);
++ goto out_drop_write;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ error = notify_change(path.dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
++
++out_drop_write:
+ mnt_drop_write(path.mnt);
+ dput_and_out:
+ path_put(&path);
+@@ -663,12 +719,15 @@ SYSCALL_DEFINE2(chmod, const char __user
+ return sys_fchmodat(AT_FDCWD, filename, mode);
+ }
+
+-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
++static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+ struct iattr newattrs;
+
++ if (!gr_acl_handle_chown(dentry, mnt))
++ return -EACCES;
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -699,7 +758,7 @@ SYSCALL_DEFINE3(chown, const char __user
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, user, group, path.mnt);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -724,7 +783,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, user, group, path.mnt);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -743,7 +802,7 @@ SYSCALL_DEFINE3(lchown, const char __use
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, user, group, path.mnt);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -766,7 +825,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
+ goto out_fput;
+ dentry = file->f_path.dentry;
+ audit_inode(NULL, dentry);
+- error = chown_common(dentry, user, group);
++ error = chown_common(dentry, user, group, file->f_path.mnt);
+ mnt_drop_write(file->f_path.mnt);
+ out_fput:
+ fput(file);
+diff -urNp linux-2.6.31.7/fs/pipe.c linux-2.6.31.7/fs/pipe.c
+--- linux-2.6.31.7/fs/pipe.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/pipe.c 2009-12-08 17:39:44.203714889 -0500
+@@ -401,9 +401,9 @@ redo:
+ }
+ if (bufs) /* More to do? */
+ continue;
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ /* syscall merging: Usually we must not sleep
+ * if O_NONBLOCK is set, or if we got some data.
+ * But if a writer sleeps in kernel space, then
+@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+@@ -511,7 +511,7 @@ redo1:
+ for (;;) {
+ int bufs;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -597,9 +597,9 @@ redo2:
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ do_wakeup = 0;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
+ mask = 0;
+ if (filp->f_mode & FMODE_READ) {
+ mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+- if (!pipe->writers && filp->f_version != pipe->w_counter)
++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+ mask |= POLLHUP;
+ }
+
+@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
+ * Most Unices do not set POLLERR for FIFOs but on Linux they
+ * behave exactly like pipes for poll().
+ */
+- if (!pipe->readers)
++ if (!atomic_read(&pipe->readers))
+ mask |= POLLERR;
+ }
+
+@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
+
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+- pipe->readers -= decr;
+- pipe->writers -= decw;
++ atomic_sub(decr, &pipe->readers);
++ atomic_sub(decw, &pipe->writers);
+
+- if (!pipe->readers && !pipe->writers) {
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
+ free_pipe_info(inode);
+ } else {
+ wake_up_interruptible_sync(&pipe->wait);
+@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
+ if (inode->i_pipe) {
+ ret = 0;
+ if (filp->f_mode & FMODE_READ)
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ if (filp->f_mode & FMODE_WRITE)
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
+ inode->i_pipe = NULL;
+ }
+
+-static struct vfsmount *pipe_mnt __read_mostly;
++struct vfsmount *pipe_mnt __read_mostly;
+ static int pipefs_delete_dentry(struct dentry *dentry)
+ {
+ /*
+@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
+ goto fail_iput;
+ inode->i_pipe = pipe;
+
+- pipe->readers = pipe->writers = 1;
++ atomic_set(&pipe->readers, 1);
++ atomic_set(&pipe->writers, 1);
+ inode->i_fop = &rdwr_pipefifo_fops;
+
+ /*
+diff -urNp linux-2.6.31.7/fs/proc/array.c linux-2.6.31.7/fs/proc/array.c
+--- linux-2.6.31.7/fs/proc/array.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/array.c 2009-12-08 17:39:44.204691455 -0500
+@@ -321,6 +321,21 @@ static inline void task_context_switch_c
+ p->nivcsw);
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++ if (p->mm)
++ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++ else
++ seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+@@ -340,9 +355,20 @@ int proc_pid_status(struct seq_file *m,
+ task_show_regs(m, task);
+ #endif
+ task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ task_pax(m, task);
++#endif
++
+ return 0;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task, int whole)
+ {
+@@ -439,6 +465,19 @@ static int do_task_stat(struct seq_file
+ gtime = task_gtime(task);
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS(mm)) {
++ eip = 0;
++ esp = 0;
++ wchan = 0;
++ }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ wchan = 0;
++ eip =0;
++ esp =0;
++#endif
++
+ /* scale priority and nice values from timeslices to -20..20 */
+ /* to make it look like a "normal" Unix priority/nice value */
+ priority = task_prio(task);
+@@ -479,9 +518,15 @@ static int do_task_stat(struct seq_file
+ vsize,
+ mm ? get_mm_rss(mm) : 0,
+ rsslim,
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0),
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0),
++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
++#else
+ mm ? mm->start_code : 0,
+ mm ? mm->end_code : 0,
+ (permitted && mm) ? mm->start_stack : 0,
++#endif
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+@@ -534,3 +579,10 @@ int proc_pid_statm(struct seq_file *m, s
+
+ return 0;
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char *buffer)
++{
++ return sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->signal->curr_ip));
++}
++#endif
+diff -urNp linux-2.6.31.7/fs/proc/base.c linux-2.6.31.7/fs/proc/base.c
+--- linux-2.6.31.7/fs/proc/base.c 2009-12-08 17:29:51.634754328 -0500
++++ linux-2.6.31.7/fs/proc/base.c 2009-12-08 17:39:44.205647819 -0500
+@@ -213,6 +213,9 @@ static int check_mem_permission(struct t
+ if (task == current)
+ return 0;
+
++ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
++ return -EPERM;
++
+ /*
+ * If current is actively ptrace'ing, and would also be
+ * permitted to freshly attach with ptrace now, permit it.
+@@ -260,6 +263,9 @@ static int proc_pid_cmdline(struct task_
+ if (!mm->arg_end)
+ goto out_mm; /* Shh! No looking before we're done */
+
++ if (gr_acl_handle_procpidmem(task))
++ goto out_mm;
++
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+@@ -287,12 +293,26 @@ out:
+ return res;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+ int res = 0;
+ struct mm_struct *mm = get_task_mm(task);
+ if (mm) {
+ unsigned int nwords = 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS(mm)) {
++ mmput(mm);
++ return res;
++ }
++#endif
++
+ do {
+ nwords += 2;
+ } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+@@ -328,7 +348,7 @@ static int proc_pid_wchan(struct task_st
+ }
+ #endif /* CONFIG_KALLSYMS */
+
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+
+ #define MAX_STACK_TRACE_DEPTH 64
+
+@@ -521,7 +541,7 @@ static int proc_pid_limits(struct task_s
+ return count;
+ }
+
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ {
+ long nr;
+@@ -935,6 +955,9 @@ static ssize_t environ_read(struct file
+ if (!task)
+ goto out_no_task;
+
++ if (gr_acl_handle_procpidmem(task))
++ goto out;
++
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out;
+
+@@ -1438,7 +1461,11 @@ static struct inode *proc_pid_make_inode
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ }
+ security_task_to_inode(task, inode);
+@@ -1456,6 +1483,9 @@ static int pid_getattr(struct vfsmount *
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task;
+ const struct cred *cred;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++#endif
+
+ generic_fillattr(inode, stat);
+
+@@ -1463,12 +1493,34 @@ static int pid_getattr(struct vfsmount *
+ stat->uid = 0;
+ stat->gid = 0;
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
++
++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
++ rcu_read_unlock();
++ return -ENOENT;
++ }
++
+ if (task) {
++ cred = __task_cred(task);
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ )
++#endif
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+- cred = __task_cred(task);
+ stat->uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ stat->gid = cred->egid;
++#endif
+ }
+ }
+ rcu_read_unlock();
+@@ -1500,11 +1552,20 @@ static int pid_revalidate(struct dentry
+
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ } else {
+ inode->i_uid = 0;
+@@ -1625,7 +1686,8 @@ static int proc_fd_info(struct inode *in
+ int fd = proc_fd(inode);
+
+ if (task) {
+- files = get_files_struct(task);
++ if (!gr_acl_handle_procpidmem(task))
++ files = get_files_struct(task);
+ put_task_struct(task);
+ }
+ if (files) {
+@@ -1877,12 +1939,22 @@ static const struct file_operations proc
+ static int proc_fd_permission(struct inode *inode, int mask)
+ {
+ int rv;
++ struct task_struct *task;
+
+ rv = generic_permission(inode, mask, NULL);
+- if (rv == 0)
+- return 0;
++
+ if (task_pid(current) == proc_pid(inode))
+ rv = 0;
++
++ task = get_proc_task(inode);
++ if (task == NULL)
++ return rv;
++
++ if (gr_acl_handle_procpidmem(task))
++ rv = -EACCES;
++
++ put_task_struct(task);
++
+ return rv;
+ }
+
+@@ -1991,6 +2063,9 @@ static struct dentry *proc_pident_lookup
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ /*
+ * Yes, it does not scale. And it should not. Don't add
+ * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2035,6 +2110,9 @@ static int proc_pident_readdir(struct fi
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ ret = 0;
+ i = filp->f_pos;
+ switch (i) {
+@@ -2401,6 +2479,9 @@ static struct dentry *proc_base_lookup(s
+ if (p > last)
+ goto out;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ error = proc_base_instantiate(dir, dentry, task, p);
+
+ out:
+@@ -2487,7 +2568,7 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_SCHED_DEBUG
+ REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+ #endif
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUSR, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -2515,7 +2596,7 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_KALLSYMS
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUSR, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+@@ -2545,6 +2626,9 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_TASK_IO_ACCOUNTING
+ INF("io", S_IRUGO, proc_tgid_io_accounting),
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
++#endif
+ };
+
+ static int proc_tgid_base_readdir(struct file * filp,
+@@ -2673,7 +2757,14 @@ static struct dentry *proc_pid_instantia
+ if (!inode)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
++#else
+ inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+ inode->i_op = &proc_tgid_base_inode_operations;
+ inode->i_fop = &proc_tgid_base_operations;
+ inode->i_flags|=S_IMMUTABLE;
+@@ -2715,7 +2806,11 @@ struct dentry *proc_pid_lookup(struct in
+ if (!task)
+ goto out;
+
++ if (gr_check_hidden_task(task))
++ goto out_put_task;
++
+ result = proc_pid_instantiate(dir, dentry, task, NULL);
++out_put_task:
+ put_task_struct(task);
+ out:
+ return result;
+@@ -2780,6 +2875,10 @@ int proc_pid_readdir(struct file * filp,
+ {
+ unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+ struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++ const struct cred *itercred;
++#endif
+ struct tgid_iter iter;
+ struct pid_namespace *ns;
+
+@@ -2798,6 +2897,20 @@ int proc_pid_readdir(struct file * filp,
+ for (iter = next_tgid(ns, iter);
+ iter.task;
+ iter.tgid += 1, iter = next_tgid(ns, iter)) {
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ itercred = __task_cred(iter.task);
++#endif
++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ )
++#endif
++ )
++ continue;
++
+ filp->f_pos = iter.tgid + TGID_OFFSET;
+ if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
+ put_task_struct(iter.task);
+@@ -2825,7 +2938,7 @@ static const struct pid_entry tid_base_s
+ #ifdef CONFIG_SCHED_DEBUG
+ REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+ #endif
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUSR, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -2852,7 +2965,7 @@ static const struct pid_entry tid_base_s
+ #ifdef CONFIG_KALLSYMS
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUSR, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+diff -urNp linux-2.6.31.7/fs/proc/cmdline.c linux-2.6.31.7/fs/proc/cmdline.c
+--- linux-2.6.31.7/fs/proc/cmdline.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/cmdline.c 2009-12-08 17:39:44.205647819 -0500
+@@ -23,7 +23,11 @@ static const struct file_operations cmdl
+
+ static int __init proc_cmdline_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
++#else
+ proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
++#endif
+ return 0;
+ }
+ module_init(proc_cmdline_init);
+diff -urNp linux-2.6.31.7/fs/proc/devices.c linux-2.6.31.7/fs/proc/devices.c
+--- linux-2.6.31.7/fs/proc/devices.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/devices.c 2009-12-08 17:39:44.206571096 -0500
+@@ -64,7 +64,11 @@ static const struct file_operations proc
+
+ static int __init proc_devices_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
++#else
+ proc_create("devices", 0, NULL, &proc_devinfo_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_devices_init);
+diff -urNp linux-2.6.31.7/fs/proc/inode.c linux-2.6.31.7/fs/proc/inode.c
+--- linux-2.6.31.7/fs/proc/inode.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/inode.c 2009-12-08 17:39:44.206571096 -0500
+@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = de->gid;
++#endif
+ }
+ if (de->size)
+ inode->i_size = de->size;
+diff -urNp linux-2.6.31.7/fs/proc/internal.h linux-2.6.31.7/fs/proc/internal.h
+--- linux-2.6.31.7/fs/proc/internal.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/internal.h 2009-12-08 17:39:44.206571096 -0500
+@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
+ struct pid *pid, struct task_struct *task);
+ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
++#endif
+ extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+
+ extern const struct file_operations proc_maps_operations;
+diff -urNp linux-2.6.31.7/fs/proc/Kconfig linux-2.6.31.7/fs/proc/Kconfig
+--- linux-2.6.31.7/fs/proc/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/Kconfig 2009-12-08 17:39:44.204691455 -0500
+@@ -30,12 +30,12 @@ config PROC_FS
+
+ config PROC_KCORE
+ bool "/proc/kcore support" if !ARM
+- depends on PROC_FS && MMU
++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
+
+ config PROC_VMCORE
+ bool "/proc/vmcore support (EXPERIMENTAL)"
+- depends on PROC_FS && CRASH_DUMP
+- default y
++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
++ default n
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
+@@ -59,8 +59,8 @@ config PROC_SYSCTL
+ limited in memory.
+
+ config PROC_PAGE_MONITOR
+- default y
+- depends on PROC_FS && MMU
++ default n
++ depends on PROC_FS && MMU && !GRKERNSEC
+ bool "Enable /proc page monitoring" if EMBEDDED
+ help
+ Various /proc files exist to monitor process memory utilization:
+diff -urNp linux-2.6.31.7/fs/proc/kcore.c linux-2.6.31.7/fs/proc/kcore.c
+--- linux-2.6.31.7/fs/proc/kcore.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/kcore.c 2009-12-08 17:39:44.207555991 -0500
+@@ -314,16 +314,16 @@ read_kcore(struct file *file, char __use
+ tsz = buflen;
+
+ while (buflen) {
+- struct kcore_list *m;
++ struct kcore_list *kc;
+
+ read_lock(&kclist_lock);
+- for (m=kclist; m; m=m->next) {
+- if (start >= m->addr && start < (m->addr+m->size))
++ for (kc=kclist; kc; kc=kc->next) {
++ if (start >= kc->addr && start < (kc->addr+kc->size))
+ break;
+ }
+ read_unlock(&kclist_lock);
+
+- if (m == NULL) {
++ if (kc == NULL) {
+ if (clear_user(buffer, tsz))
+ return -EFAULT;
+ } else if (is_vmalloc_addr((void *)start)) {
+@@ -368,7 +368,7 @@ read_kcore(struct file *file, char __use
+ */
+ vmsize = __copy_from_user_inatomic(
+ elf_buf + (vmstart - start),
+- (char *)vmstart, vmsize);
++ (char __user *)vmstart, vmsize);
+ }
+ read_unlock(&vmlist_lock);
+ if (copy_to_user(buffer, elf_buf, tsz)) {
+@@ -410,10 +410,12 @@ read_kcore(struct file *file, char __use
+
+ static int __init proc_kcore_init(void)
+ {
++#if !defined(CONFIG_GRKERNSEC_PROC_ADD) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
+ if (proc_root_kcore)
+ proc_root_kcore->size =
+ (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
++#endif
+ return 0;
+ }
+ module_init(proc_kcore_init);
+diff -urNp linux-2.6.31.7/fs/proc/nommu.c linux-2.6.31.7/fs/proc/nommu.c
+--- linux-2.6.31.7/fs/proc/nommu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/nommu.c 2009-12-08 17:39:44.207555991 -0500
+@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ }
+
+ seq_putc(m, '\n');
+@@ -109,7 +109,7 @@ static void *nommu_region_list_next(stru
+ return rb_next((struct rb_node *) v);
+ }
+
+-static struct seq_operations proc_nommu_region_list_seqop = {
++static const struct seq_operations proc_nommu_region_list_seqop = {
+ .start = nommu_region_list_start,
+ .next = nommu_region_list_next,
+ .stop = nommu_region_list_stop,
+diff -urNp linux-2.6.31.7/fs/proc/proc_net.c linux-2.6.31.7/fs/proc/proc_net.c
+--- linux-2.6.31.7/fs/proc/proc_net.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/proc_net.c 2009-12-08 17:39:44.207555991 -0500
+@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
+ struct task_struct *task;
+ struct nsproxy *ns;
+ struct net *net = NULL;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred = current_cred();
++#endif
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->fsuid)
++ return net;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
++ return net;
++#endif
+
+ rcu_read_lock();
+ task = pid_task(proc_pid(dir), PIDTYPE_PID);
+diff -urNp linux-2.6.31.7/fs/proc/proc_sysctl.c linux-2.6.31.7/fs/proc/proc_sysctl.c
+--- linux-2.6.31.7/fs/proc/proc_sysctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/proc_sysctl.c 2009-12-08 17:39:44.208690287 -0500
+@@ -7,6 +7,8 @@
+ #include <linux/security.h>
+ #include "internal.h"
+
++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
++
+ static const struct dentry_operations proc_sys_dentry_operations;
+ static const struct file_operations proc_sys_file_operations;
+ static const struct inode_operations proc_sys_inode_operations;
+@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
+ if (!p)
+ goto out;
+
++ if (gr_handle_sysctl(p, MAY_EXEC))
++ goto out;
++
+ err = ERR_PTR(-ENOMEM);
+ inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
+ if (h)
+@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
+ if (*pos < file->f_pos)
+ continue;
+
++ if (gr_handle_sysctl(table, 0))
++ continue;
++
+ res = proc_sys_fill_cache(file, dirent, filldir, head, table);
+ if (res)
+ return res;
+@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
++ if (table && gr_handle_sysctl(table, MAY_EXEC))
++ return -ENOENT;
++
+ generic_fillattr(inode, stat);
+ if (table)
+ stat->mode = (stat->mode & S_IFMT) | table->mode;
+diff -urNp linux-2.6.31.7/fs/proc/root.c linux-2.6.31.7/fs/proc/root.c
+--- linux-2.6.31.7/fs/proc/root.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/root.c 2009-12-08 17:39:44.208690287 -0500
+@@ -134,7 +134,15 @@ void __init proc_root_init(void)
+ #ifdef CONFIG_PROC_DEVICETREE
+ proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_mkdir("bus", NULL);
++#endif
+ proc_sys_init();
+ }
+
+diff -urNp linux-2.6.31.7/fs/proc/task_mmu.c linux-2.6.31.7/fs/proc/task_mmu.c
+--- linux-2.6.31.7/fs/proc/task_mmu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/task_mmu.c 2009-12-08 17:39:44.208690287 -0500
+@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
+ "VmStk:\t%8lu kB\n"
+ "VmExe:\t%8lu kB\n"
+ "VmLib:\t%8lu kB\n"
+- "VmPTE:\t%8lu kB\n",
+- hiwater_vm << (PAGE_SHIFT-10),
++ "VmPTE:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++ ,hiwater_vm << (PAGE_SHIFT-10),
+ (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+ mm->locked_vm << (PAGE_SHIFT-10),
+ hiwater_rss << (PAGE_SHIFT-10),
+ total_rss << (PAGE_SHIFT-10),
+ data << (PAGE_SHIFT-10),
+ mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
++ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ , mm->context.user_cs_base, mm->context.user_cs_limit
++#endif
++
++ );
+ }
+
+ unsigned long task_vsize(struct mm_struct *mm)
+@@ -199,6 +210,12 @@ static int do_maps_open(struct inode *in
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+@@ -217,13 +234,22 @@ static void show_map_vma(struct seq_file
+ }
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
++#else
+ vma->vm_start,
+ vma->vm_end,
++#endif
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+ flags & VM_MAYSHARE ? 's' : 'p',
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
++#else
+ pgoff,
++#endif
+ MAJOR(dev), MINOR(dev), ino, &len);
+
+ /*
+@@ -232,16 +258,16 @@ static void show_map_vma(struct seq_file
+ */
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "\n");
++ seq_path(m, &file->f_path, "\n\\");
+ } else {
+ const char *name = arch_vma_name(vma);
+ if (!name) {
+ if (mm) {
+- if (vma->vm_start <= mm->start_brk &&
+- vma->vm_end >= mm->brk) {
++ if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ name = "[heap]";
+- } else if (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack) {
++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
++ (vma->vm_start <= mm->start_stack &&
++ vma->vm_end >= mm->start_stack)) {
+ name = "[stack]";
+ }
+ } else {
+@@ -384,9 +410,16 @@ static int show_smap(struct seq_file *m,
+ };
+
+ memset(&mss, 0, sizeof mss);
+- mss.vma = vma;
+- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
++#endif
++ mss.vma = vma;
++ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ }
++#endif
+
+ show_map_vma(m, vma);
+
+@@ -402,7 +435,11 @@ static int show_smap(struct seq_file *m,
+ "Swap: %8lu kB\n"
+ "KernelPageSize: %8lu kB\n"
+ "MMUPageSize: %8lu kB\n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
++#else
+ (vma->vm_end - vma->vm_start) >> 10,
++#endif
+ mss.resident >> 10,
+ (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+ mss.shared_clean >> 10,
+diff -urNp linux-2.6.31.7/fs/proc/task_nommu.c linux-2.6.31.7/fs/proc/task_nommu.c
+--- linux-2.6.31.7/fs/proc/task_nommu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/proc/task_nommu.c 2009-12-08 17:39:44.208690287 -0500
+@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
+ else
+ bytes += kobjsize(mm);
+
+- if (current->fs && current->fs->users > 1)
++ if (current->fs && atomic_read(&current->fs->users) > 1)
+ sbytes += kobjsize(current->fs);
+ else
+ bytes += kobjsize(current->fs);
+@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ }
+
+ seq_putc(m, '\n');
+diff -urNp linux-2.6.31.7/fs/readdir.c linux-2.6.31.7/fs/readdir.c
+--- linux-2.6.31.7/fs/readdir.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/readdir.c 2009-12-08 17:39:44.208690287 -0500
+@@ -16,6 +16,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
+
+ #include <asm/uaccess.h>
+
+@@ -67,6 +68,7 @@ struct old_linux_dirent {
+
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
++ struct file * file;
+ int result;
+ };
+
+@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
+ buf->result = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
+
+ buf.result = 0;
+ buf.dirent = dirent;
++ buf.file = file;
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (buf.result)
+@@ -142,6 +149,7 @@ struct linux_dirent {
+ struct getdents_callback {
+ struct linux_dirent __user * current_dir;
+ struct linux_dirent __user * previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error >= 0)
+@@ -228,6 +241,7 @@ out:
+ struct getdents_callback64 {
+ struct linux_dirent64 __user * current_dir;
+ struct linux_dirent64 __user * previous;
++ struct file *file;
+ int count;
+ int error;
+ };
+@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
++ buf.file = file;
+ buf.count = count;
+ buf.error = 0;
+
+diff -urNp linux-2.6.31.7/fs/reiserfs/do_balan.c linux-2.6.31.7/fs/reiserfs/do_balan.c
+--- linux-2.6.31.7/fs/reiserfs/do_balan.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/reiserfs/do_balan.c 2009-12-08 17:39:44.215587607 -0500
+@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
+ return;
+ }
+
+- atomic_inc(&(fs_generation(tb->tb_sb)));
++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
+ do_balance_starts(tb);
+
+ /* balance leaf returns 0 except if combining L R and S into
+diff -urNp linux-2.6.31.7/fs/reiserfs/item_ops.c linux-2.6.31.7/fs/reiserfs/item_ops.c
+--- linux-2.6.31.7/fs/reiserfs/item_ops.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/reiserfs/item_ops.c 2009-12-08 17:39:44.215587607 -0500
+@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations stat_data_ops = {
++static const struct item_operations stat_data_ops = {
+ .bytes_number = sd_bytes_number,
+ .decrement_key = sd_decrement_key,
+ .is_left_mergeable = sd_is_left_mergeable,
+@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations direct_ops = {
++static const struct item_operations direct_ops = {
+ .bytes_number = direct_bytes_number,
+ .decrement_key = direct_decrement_key,
+ .is_left_mergeable = direct_is_left_mergeable,
+@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+-static struct item_operations indirect_ops = {
++static const struct item_operations indirect_ops = {
+ .bytes_number = indirect_bytes_number,
+ .decrement_key = indirect_decrement_key,
+ .is_left_mergeable = indirect_is_left_mergeable,
+@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
+ printk("\n");
+ }
+
+-static struct item_operations direntry_ops = {
++static const struct item_operations direntry_ops = {
+ .bytes_number = direntry_bytes_number,
+ .decrement_key = direntry_decrement_key,
+ .is_left_mergeable = direntry_is_left_mergeable,
+@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
+ "Invalid item type observed, run fsck ASAP");
+ }
+
+-static struct item_operations errcatch_ops = {
++static const struct item_operations errcatch_ops = {
+ errcatch_bytes_number,
+ errcatch_decrement_key,
+ errcatch_is_left_mergeable,
+@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
+ #error Item types must use disk-format assigned values.
+ #endif
+
+-struct item_operations *item_ops[TYPE_ANY + 1] = {
++const struct item_operations *item_ops[TYPE_ANY + 1] = {
+ &stat_data_ops,
+ &indirect_ops,
+ &direct_ops,
+diff -urNp linux-2.6.31.7/fs/reiserfs/procfs.c linux-2.6.31.7/fs/reiserfs/procfs.c
+--- linux-2.6.31.7/fs/reiserfs/procfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/reiserfs/procfs.c 2009-12-08 17:39:44.216660903 -0500
+@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
+ "SMALL_TAILS " : "NO_TAILS ",
+ replay_only(sb) ? "REPLAY_ONLY " : "",
+ convert_reiserfs(sb) ? "CONV " : "",
+- atomic_read(&r->s_generation_counter),
++ atomic_read_unchecked(&r->s_generation_counter),
+ SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+ SF(s_do_balance), SF(s_unneeded_left_neighbor),
+ SF(s_good_search_by_key_reada), SF(s_bmaps),
+diff -urNp linux-2.6.31.7/fs/romfs/super.c linux-2.6.31.7/fs/romfs/super.c
+--- linux-2.6.31.7/fs/romfs/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/romfs/super.c 2009-12-08 17:39:44.216660903 -0500
+@@ -284,7 +284,7 @@ static const struct file_operations romf
+ .readdir = romfs_readdir,
+ };
+
+-static struct inode_operations romfs_dir_inode_operations = {
++static const struct inode_operations romfs_dir_inode_operations = {
+ .lookup = romfs_lookup,
+ };
+
+diff -urNp linux-2.6.31.7/fs/select.c linux-2.6.31.7/fs/select.c
+--- linux-2.6.31.7/fs/select.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/select.c 2009-12-08 17:39:44.216660903 -0500
+@@ -19,6 +19,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
++#include <linux/security.h>
+ #include <linux/personality.h> /* for STICKY_TIMEOUTS */
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+@@ -814,6 +815,7 @@ int do_sys_poll(struct pollfd __user *uf
+ struct poll_list *walk = head;
+ unsigned long todo = nfds;
+
++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
+ if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
+ return -EINVAL;
+
+diff -urNp linux-2.6.31.7/fs/seq_file.c linux-2.6.31.7/fs/seq_file.c
+--- linux-2.6.31.7/fs/seq_file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/seq_file.c 2009-12-08 17:39:44.217655056 -0500
+@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
+ return 0;
+ }
+ if (!m->buf) {
+- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++ m->size = PAGE_SIZE;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ if (!m->buf)
+ return -ENOMEM;
+ }
+@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
+ Eoverflow:
+ m->op->stop(m, p);
+ kfree(m->buf);
+- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++ m->size <<= 1;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ return !m->buf ? -ENOMEM : -EAGAIN;
+ }
+
+@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
+ m->version = file->f_version;
+ /* grab buffer if we didn't have one */
+ if (!m->buf) {
+- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++ m->size = PAGE_SIZE;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ if (!m->buf)
+ goto Enomem;
+ }
+@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
+ goto Fill;
+ m->op->stop(m, p);
+ kfree(m->buf);
+- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++ m->size <<= 1;
++ m->buf = kmalloc(m->size, GFP_KERNEL);
+ if (!m->buf)
+ goto Enomem;
+ m->count = 0;
+diff -urNp linux-2.6.31.7/fs/smbfs/symlink.c linux-2.6.31.7/fs/smbfs/symlink.c
+--- linux-2.6.31.7/fs/smbfs/symlink.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/smbfs/symlink.c 2009-12-08 17:39:44.217655056 -0500
+@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
+
+ static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
+ }
+diff -urNp linux-2.6.31.7/fs/splice.c linux-2.6.31.7/fs/splice.c
+--- linux-2.6.31.7/fs/splice.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/splice.c 2009-12-08 17:39:44.218800964 -0500
+@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
+ pipe_lock(pipe);
+
+ for (;;) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
+ do_wakeup = 0;
+ }
+
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -529,7 +529,7 @@ static ssize_t kernel_readv(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
++ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -544,7 +544,7 @@ static ssize_t kernel_write(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_write(file, (const char __user *)buf, count, &pos);
++ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -586,7 +586,7 @@ ssize_t default_file_splice_read(struct
+ goto err;
+
+ this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+- vec[i].iov_base = (void __user *) page_address(page);
++ vec[i].iov_base = (__force void __user *) page_address(page);
+ vec[i].iov_len = this_len;
+ pages[i] = page;
+ spd.nr_pages++;
+@@ -806,10 +806,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
+ {
+ while (!pipe->nrbufs) {
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ return 0;
+
+- if (!pipe->waiting_writers && sd->num_spliced)
++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+ return 0;
+
+ if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1152,7 +1152,7 @@ ssize_t splice_direct_to_actor(struct fi
+ * out of the pipe right after the splice_to_pipe(). So set
+ * PIPE_READERS appropriately.
+ */
+- pipe->readers = 1;
++ atomic_set(&pipe->readers, 1);
+
+ current->splice_pipe = pipe;
+ }
+@@ -1710,9 +1710,9 @@ static int ipipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+@@ -1744,7 +1744,7 @@ static int opipe_prep(struct pipe_inode_
+ pipe_lock(pipe);
+
+ while (pipe->nrbufs >= PIPE_BUFFERS) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ break;
+@@ -1757,9 +1757,9 @@ static int opipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -1795,14 +1795,14 @@ retry:
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+ break;
+ }
+
+- if (!ipipe->nrbufs && !ipipe->writers)
++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+ break;
+
+ /*
+@@ -1902,7 +1902,7 @@ static int link_pipe(struct pipe_inode_i
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -1947,7 +1947,7 @@ static int link_pipe(struct pipe_inode_i
+ * return EAGAIN if we have the potential of some data in the
+ * future, otherwise just return 0
+ */
+- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+ ret = -EAGAIN;
+
+ pipe_unlock(ipipe);
+diff -urNp linux-2.6.31.7/fs/squashfs/super.c linux-2.6.31.7/fs/squashfs/super.c
+--- linux-2.6.31.7/fs/squashfs/super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/squashfs/super.c 2009-12-08 17:39:44.218800964 -0500
+@@ -44,7 +44,7 @@
+ #include "squashfs.h"
+
+ static struct file_system_type squashfs_fs_type;
+-static struct super_operations squashfs_super_ops;
++static const struct super_operations squashfs_super_ops;
+
+ static int supported_squashfs_filesystem(short major, short minor, short comp)
+ {
+@@ -444,7 +444,7 @@ static struct file_system_type squashfs_
+ .fs_flags = FS_REQUIRES_DEV
+ };
+
+-static struct super_operations squashfs_super_ops = {
++static const struct super_operations squashfs_super_ops = {
+ .alloc_inode = squashfs_alloc_inode,
+ .destroy_inode = squashfs_destroy_inode,
+ .statfs = squashfs_statfs,
+diff -urNp linux-2.6.31.7/fs/sysfs/bin.c linux-2.6.31.7/fs/sysfs/bin.c
+--- linux-2.6.31.7/fs/sysfs/bin.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/sysfs/bin.c 2009-12-08 17:39:44.218800964 -0500
+@@ -40,7 +40,7 @@ struct bin_buffer {
+ struct mutex mutex;
+ void *buffer;
+ int mmapped;
+- struct vm_operations_struct *vm_ops;
++ const struct vm_operations_struct *vm_ops;
+ struct file *file;
+ struct hlist_node list;
+ };
+@@ -331,7 +331,7 @@ static int bin_migrate(struct vm_area_st
+ }
+ #endif
+
+-static struct vm_operations_struct bin_vm_ops = {
++static const struct vm_operations_struct bin_vm_ops = {
+ .open = bin_vma_open,
+ .close = bin_vma_close,
+ .fault = bin_fault,
+diff -urNp linux-2.6.31.7/fs/sysfs/file.c linux-2.6.31.7/fs/sysfs/file.c
+--- linux-2.6.31.7/fs/sysfs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/sysfs/file.c 2009-12-08 17:39:44.219738480 -0500
+@@ -53,7 +53,7 @@ struct sysfs_buffer {
+ size_t count;
+ loff_t pos;
+ char * page;
+- struct sysfs_ops * ops;
++ const struct sysfs_ops * ops;
+ struct mutex mutex;
+ int needs_read_fill;
+ int event;
+@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
+ {
+ struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
+- struct sysfs_ops * ops = buffer->ops;
++ const struct sysfs_ops * ops = buffer->ops;
+ int ret = 0;
+ ssize_t count;
+
+@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
+ {
+ struct sysfs_dirent *attr_sd = dentry->d_fsdata;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
+- struct sysfs_ops * ops = buffer->ops;
++ const struct sysfs_ops * ops = buffer->ops;
+ int rc;
+
+ /* need attr_sd for attr and ops, its parent for kobj */
+@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
+ struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
+ struct sysfs_buffer *buffer;
+- struct sysfs_ops *ops;
++ const struct sysfs_ops *ops;
+ int error = -EACCES;
+ char *p;
+
+diff -urNp linux-2.6.31.7/fs/sysfs/symlink.c linux-2.6.31.7/fs/sysfs/symlink.c
+--- linux-2.6.31.7/fs/sysfs/symlink.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/sysfs/symlink.c 2009-12-08 17:39:44.220810792 -0500
+@@ -203,7 +203,7 @@ static void *sysfs_follow_link(struct de
+
+ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *page = nd_get_link(nd);
++ const char *page = nd_get_link(nd);
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+ }
+diff -urNp linux-2.6.31.7/fs/ubifs/file.c linux-2.6.31.7/fs/ubifs/file.c
+--- linux-2.6.31.7/fs/ubifs/file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/ubifs/file.c 2009-12-08 17:39:44.220810792 -0500
+@@ -1536,7 +1536,7 @@ out_unlock:
+ return err;
+ }
+
+-static struct vm_operations_struct ubifs_file_vm_ops = {
++static const struct vm_operations_struct ubifs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = ubifs_vm_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/udf/balloc.c linux-2.6.31.7/fs/udf/balloc.c
+--- linux-2.6.31.7/fs/udf/balloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/udf/balloc.c 2009-12-08 17:39:44.221805287 -0500
+@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
+
+ mutex_lock(&sbi->s_alloc_mutex);
+ partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+- if (bloc->logicalBlockNum < 0 ||
+- (bloc->logicalBlockNum + count) >
+- partmap->s_partition_len) {
++ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+ udf_debug("%d < %d || %d + %d > %d\n",
+ bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
+ count, partmap->s_partition_len);
+@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
+
+ mutex_lock(&sbi->s_alloc_mutex);
+ partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+- if (bloc->logicalBlockNum < 0 ||
+- (bloc->logicalBlockNum + count) >
+- partmap->s_partition_len) {
++ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+ udf_debug("%d < %d || %d + %d > %d\n",
+ bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+ partmap->s_partition_len);
+diff -urNp linux-2.6.31.7/fs/utimes.c linux-2.6.31.7/fs/utimes.c
+--- linux-2.6.31.7/fs/utimes.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/utimes.c 2009-12-08 17:39:44.221805287 -0500
+@@ -1,6 +1,7 @@
+ #include <linux/compiler.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/security.h>
+ #include <linux/linkage.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
+ goto mnt_drop_write_and_out;
+ }
+ }
++
++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
++ error = -EACCES;
++ goto mnt_drop_write_and_out;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ error = notify_change(path->dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+diff -urNp linux-2.6.31.7/fs/xfs/linux-2.6/xfs_file.c linux-2.6.31.7/fs/xfs/linux-2.6/xfs_file.c
+--- linux-2.6.31.7/fs/xfs/linux-2.6/xfs_file.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/xfs/linux-2.6/xfs_file.c 2009-12-08 17:39:44.221805287 -0500
+@@ -42,7 +42,7 @@
+
+ #include <linux/dcache.h>
+
+-static struct vm_operations_struct xfs_file_vm_ops;
++static const struct vm_operations_struct xfs_file_vm_ops;
+
+ STATIC ssize_t
+ xfs_file_aio_read(
+@@ -271,7 +271,7 @@ const struct file_operations xfs_dir_fil
+ .fsync = xfs_file_fsync,
+ };
+
+-static struct vm_operations_struct xfs_file_vm_ops = {
++static const struct vm_operations_struct xfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = xfs_vm_page_mkwrite,
+ };
+diff -urNp linux-2.6.31.7/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.31.7/fs/xfs/linux-2.6/xfs_iops.c
+--- linux-2.6.31.7/fs/xfs/linux-2.6/xfs_iops.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/xfs/linux-2.6/xfs_iops.c 2009-12-08 17:39:44.221805287 -0500
+@@ -478,7 +478,7 @@ xfs_vn_put_link(
+ struct nameidata *nd,
+ void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ if (!IS_ERR(s))
+ kfree(s);
+diff -urNp linux-2.6.31.7/fs/xfs/linux-2.6/xfs_super.c linux-2.6.31.7/fs/xfs/linux-2.6/xfs_super.c
+--- linux-2.6.31.7/fs/xfs/linux-2.6/xfs_super.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/xfs/linux-2.6/xfs_super.c 2009-12-08 17:39:44.222806527 -0500
+@@ -67,7 +67,7 @@
+ #include <linux/freezer.h>
+ #include <linux/parser.h>
+
+-static struct super_operations xfs_super_operations;
++static const struct super_operations xfs_super_operations;
+ static kmem_zone_t *xfs_ioend_zone;
+ mempool_t *xfs_ioend_pool;
+
+@@ -1532,7 +1532,7 @@ xfs_fs_get_sb(
+ mnt);
+ }
+
+-static struct super_operations xfs_super_operations = {
++static const struct super_operations xfs_super_operations = {
+ .alloc_inode = xfs_fs_alloc_inode,
+ .destroy_inode = xfs_fs_destroy_inode,
+ .write_inode = xfs_fs_write_inode,
+diff -urNp linux-2.6.31.7/fs/xfs/xfs_bmap.c linux-2.6.31.7/fs/xfs/xfs_bmap.c
+--- linux-2.6.31.7/fs/xfs/xfs_bmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/fs/xfs/xfs_bmap.c 2009-12-08 17:39:44.223804522 -0500
+@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
+ int nmap,
+ int ret_nmap);
+ #else
+-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
+ #endif /* DEBUG */
+
+ #if defined(XFS_RW_TRACE)
+diff -urNp linux-2.6.31.7/grsecurity/gracl_alloc.c linux-2.6.31.7/grsecurity/gracl_alloc.c
+--- linux-2.6.31.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_alloc.c 2009-12-08 17:39:44.226685094 -0500
+@@ -0,0 +1,105 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static unsigned long alloc_stack_next = 1;
++static unsigned long alloc_stack_size = 1;
++static void **alloc_stack;
++
++static __inline__ int
++alloc_pop(void)
++{
++ if (alloc_stack_next == 1)
++ return 0;
++
++ kfree(alloc_stack[alloc_stack_next - 2]);
++
++ alloc_stack_next--;
++
++ return 1;
++}
++
++static __inline__ int
++alloc_push(void *buf)
++{
++ if (alloc_stack_next >= alloc_stack_size)
++ return 1;
++
++ alloc_stack[alloc_stack_next - 1] = buf;
++
++ alloc_stack_next++;
++
++ return 0;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++ void *ret = NULL;
++
++ if (!len || len > PAGE_SIZE)
++ goto out;
++
++ ret = kmalloc(len, GFP_KERNEL);
++
++ if (ret) {
++ if (alloc_push(ret)) {
++ kfree(ret);
++ ret = NULL;
++ }
++ }
++
++out:
++ return ret;
++}
++
++void *
++acl_alloc_num(unsigned long num, unsigned long len)
++{
++ if (!len || (num > (PAGE_SIZE / len)))
++ return NULL;
++
++ return acl_alloc(num * len);
++}
++
++void
++acl_free_all(void)
++{
++ if (gr_acl_is_enabled() || !alloc_stack)
++ return;
++
++ while (alloc_pop()) ;
++
++ if (alloc_stack) {
++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++ kfree(alloc_stack);
++ else
++ vfree(alloc_stack);
++ }
++
++ alloc_stack = NULL;
++ alloc_stack_size = 1;
++ alloc_stack_next = 1;
++
++ return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++ if ((size * sizeof (void *)) <= PAGE_SIZE)
++ alloc_stack =
++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++ else
++ alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++ alloc_stack_size = size;
++
++ if (!alloc_stack)
++ return 0;
++ else
++ return 1;
++}
+diff -urNp linux-2.6.31.7/grsecurity/gracl.c linux-2.6.31.7/grsecurity/gracl.c
+--- linux-2.6.31.7/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl.c 2009-12-08 17:39:44.226685094 -0500
+@@ -0,0 +1,3912 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++static struct acl_role_db acl_role_set;
++static struct name_db name_set;
++static struct inodev_db inodev_set;
++
++/* for keeping track of userspace pointers used for subjects, so we
++ can share references in the kernel as well
++*/
++
++static struct dentry *real_root;
++static struct vfsmount *real_root_mnt;
++
++static struct acl_subj_map_db subj_map_set;
++
++static struct acl_role_label *default_role;
++
++static struct acl_role_label *role_list;
++
++static u16 acl_sp_role_value;
++
++extern char *gr_shared_page[4];
++static DECLARE_MUTEX(gr_dev_sem);
++DEFINE_RWLOCK(gr_inode_lock);
++
++struct gr_arg *gr_usermode;
++
++static unsigned int gr_status __read_only = GR_STATUS_INIT;
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt);
++#endif
++
++unsigned char *gr_system_salt;
++unsigned char *gr_system_sum;
++
++static struct sprole_pw **acl_special_roles = NULL;
++static __u16 num_sprole_pws = 0;
++
++static struct acl_role_label *kernel_role = NULL;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++extern struct vfsmount *sock_mnt;
++extern struct vfsmount *pipe_mnt;
++extern struct vfsmount *shm_mnt;
++static struct acl_object_label *fakefs_obj;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return (gr_status & GR_READY);
++}
++
++char gr_roletype_to_char(void)
++{
++ switch (current->role->roletype &
++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++ GR_ROLE_SPECIAL)) {
++ case GR_ROLE_DEFAULT:
++ return 'D';
++ case GR_ROLE_USER:
++ return 'U';
++ case GR_ROLE_GROUP:
++ return 'G';
++ case GR_ROLE_SPECIAL:
++ return 'S';
++ }
++
++ return 'X';
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++ if (current->role->roletype & GR_ROLE_TPE)
++ return 1;
++ else
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (inode && S_ISBLK(inode->i_mode) &&
++ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++static int
++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
++{
++ int i;
++ unsigned long *l1;
++ unsigned long *l2;
++ unsigned char *c1;
++ unsigned char *c2;
++ int num_longs;
++
++ if (likely(lena != lenb))
++ return 0;
++
++ l1 = (unsigned long *)a;
++ l2 = (unsigned long *)b;
++
++ num_longs = lena / sizeof(unsigned long);
++
++ for (i = num_longs; i--; l1++, l2++) {
++ if (unlikely(*l1 != *l2))
++ return 0;
++ }
++
++ c1 = (unsigned char *) l1;
++ c2 = (unsigned char *) l2;
++
++ i = lena - (num_longs * sizeof(unsigned long));
++
++ for (; i--; c1++, c2++) {
++ if (unlikely(*c1 != *c2))
++ return 0;
++ }
++
++ return 1;
++}
++
++static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
++ struct dentry *root, struct vfsmount *rootmnt,
++ char *buffer, int buflen)
++{
++ char * end = buffer+buflen;
++ char * retval;
++ int namelen;
++
++ *--end = '\0';
++ buflen--;
++
++ if (buflen < 1)
++ goto Elong;
++ /* Get '/' right */
++ retval = end-1;
++ *retval = '/';
++
++ for (;;) {
++ struct dentry * parent;
++
++ if (dentry == root && vfsmnt == rootmnt)
++ break;
++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++ /* Global root? */
++ spin_lock(&vfsmount_lock);
++ if (vfsmnt->mnt_parent == vfsmnt) {
++ spin_unlock(&vfsmount_lock);
++ goto global_root;
++ }
++ dentry = vfsmnt->mnt_mountpoint;
++ vfsmnt = vfsmnt->mnt_parent;
++ spin_unlock(&vfsmount_lock);
++ continue;
++ }
++ parent = dentry->d_parent;
++ prefetch(parent);
++ namelen = dentry->d_name.len;
++ buflen -= namelen + 1;
++ if (buflen < 0)
++ goto Elong;
++ end -= namelen;
++ memcpy(end, dentry->d_name.name, namelen);
++ *--end = '/';
++ retval = end;
++ dentry = parent;
++ }
++
++ return retval;
++
++global_root:
++ namelen = dentry->d_name.len;
++ buflen -= namelen;
++ if (buflen < 0)
++ goto Elong;
++ retval -= namelen-1; /* hit the slash */
++ memcpy(retval, dentry->d_name.name, namelen);
++ return retval;
++Elong:
++ return ERR_PTR(-ENAMETOOLONG);
++}
++
++static char *
++gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
++ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
++{
++ char *retval;
++
++ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
++ if (unlikely(IS_ERR(retval)))
++ retval = strcpy(buf, "<path too long>");
++ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
++ retval[1] = '\0';
++
++ return retval;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++
++ /* we can use real_root, real_root_mnt, because this is only called
++ by the RBAC system */
++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
++
++ return res;
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++ struct task_struct *reaper = &init_task;
++
++ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
++ read_lock(&reaper->fs->lock);
++ root = dget(reaper->fs->root.dentry);
++ rootmnt = mntget(reaper->fs->root.mnt);
++ read_unlock(&reaper->fs->lock);
++
++ spin_lock(&dcache_lock);
++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
++ spin_unlock(&dcache_lock);
++
++ dput(root);
++ mntput(rootmnt);
++ return res;
++}
++
++static char *
++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ char *ret;
++ spin_lock(&dcache_lock);
++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++ spin_unlock(&dcache_lock);
++ return ret;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++ /* masks off auditable permission flags, then shifts them to create
++ auditing flags, and adds the special case of append auditing if
++ we're requesting write */
++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
++}
++
++struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++ unsigned int index = shash(userp, subj_map_set.s_size);
++ struct subject_map *match;
++
++ match = subj_map_set.s_hash[index];
++
++ while (match && match->user != userp)
++ match = match->next;
++
++ if (match != NULL)
++ return match->kernel;
++ else
++ return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
++ struct subject_map **curr;
++
++ subjmap->prev = NULL;
++
++ curr = &subj_map_set.s_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = subjmap;
++
++ subjmap->next = *curr;
++ *curr = subjmap;
++
++ return;
++}
++
++static struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++ struct acl_role_label *match;
++ struct role_allowed_ip *ipp;
++ unsigned int x;
++
++ match = acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == uid)
++ goto found;
++ }
++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++ break;
++ match = match->next;
++ }
++found:
++ if (match == NULL) {
++ try_group:
++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++ match = acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == gid)
++ goto found2;
++ }
++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++ break;
++ match = match->next;
++ }
++found2:
++ if (match == NULL)
++ match = default_role;
++ if (match->allowed_ips == NULL)
++ return match;
++ else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ match = default_role;
++ }
++ } else if (match->allowed_ips == NULL) {
++ return match;
++ } else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ goto try_group;
++ }
++
++ return match;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct name_entry *
++lookup_name_entry(const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++ struct name_entry *match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
++ match = match->next;
++
++ return match;
++}
++
++static struct name_entry *
++lookup_name_entry_create(const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++ struct name_entry *match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ !match->deleted))
++ match = match->next;
++
++ if (match && match->deleted)
++ return match;
++
++ match = name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ match->deleted))
++ match = match->next;
++
++ if (match && !match->deleted)
++ return match;
++ else
++ return NULL;
++}
++
++static struct inodev_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++ unsigned int index = fhash(ino, dev, inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
++ match = match->next;
++
++ return match;
++}
++
++static void
++insert_inodev_entry(struct inodev_entry *entry)
++{
++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
++ inodev_set.i_size);
++ struct inodev_entry **curr;
++
++ entry->prev = NULL;
++
++ curr = &inodev_set.i_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = entry;
++
++ entry->next = *curr;
++ *curr = entry;
++
++ return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++ unsigned int index =
++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++ struct acl_role_label **curr;
++ struct acl_role_label *tmp;
++
++ curr = &acl_role_set.r_hash[index];
++
++ /* if role was already inserted due to domains and already has
++ a role in the same bucket as it attached, then we need to
++ combine these two buckets
++ */
++ if (role->next) {
++ tmp = role->next;
++ while (tmp->next)
++ tmp = tmp->next;
++ tmp->next = *curr;
++ } else
++ role->next = *curr;
++ *curr = role;
++
++ return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++ int i;
++
++ if (role_list == NULL) {
++ role_list = role;
++ role->prev = NULL;
++ } else {
++ role->prev = role_list;
++ role_list = role;
++ }
++
++ /* used for hash chains */
++ role->next = NULL;
++
++ if (role->roletype & GR_ROLE_DOMAIN) {
++ for (i = 0; i < role->domain_child_num; i++)
++ __insert_acl_role_label(role, role->domain_children[i]);
++ } else
++ __insert_acl_role_label(role, role->uidgid);
++}
++
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
++{
++ struct name_entry **curr, *nentry;
++ struct inodev_entry *ientry;
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % name_set.n_size;
++
++ curr = &name_set.n_hash[index];
++
++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
++ curr = &((*curr)->next);
++
++ if (*curr != NULL)
++ return 1;
++
++ nentry = acl_alloc(sizeof (struct name_entry));
++ if (nentry == NULL)
++ return 0;
++ ientry = acl_alloc(sizeof (struct inodev_entry));
++ if (ientry == NULL)
++ return 0;
++ ientry->nentry = nentry;
++
++ nentry->key = key;
++ nentry->name = name;
++ nentry->inode = inode;
++ nentry->device = device;
++ nentry->len = len;
++ nentry->deleted = deleted;
++
++ nentry->prev = NULL;
++ curr = &name_set.n_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = nentry;
++ nentry->next = *curr;
++ *curr = nentry;
++
++ /* insert us into the table searchable by inode/dev */
++ insert_inodev_entry(ientry);
++
++ return 1;
++}
++
++static void
++insert_acl_obj_label(struct acl_object_label *obj,
++ struct acl_subject_label *subj)
++{
++ unsigned int index =
++ fhash(obj->inode, obj->device, subj->obj_hash_size);
++ struct acl_object_label **curr;
++
++
++ obj->prev = NULL;
++
++ curr = &subj->obj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++static void
++insert_acl_subj_label(struct acl_subject_label *obj,
++ struct acl_role_label *role)
++{
++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
++ struct acl_subject_label **curr;
++
++ obj->prev = NULL;
++
++ curr = &role->subj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
++
++static void *
++create_table(__u32 * len, int elementsize)
++{
++ unsigned int table_sizes[] = {
++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++ 4194301, 8388593, 16777213, 33554393, 67108859
++ };
++ void *newtable = NULL;
++ unsigned int pwr = 0;
++
++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++ table_sizes[pwr] <= *len)
++ pwr++;
++
++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
++ return newtable;
++
++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
++ newtable =
++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
++ else
++ newtable = vmalloc(table_sizes[pwr] * elementsize);
++
++ *len = table_sizes[pwr];
++
++ return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg)
++{
++ struct task_struct *reaper = &init_task;
++ unsigned int stacksize;
++
++ subj_map_set.s_size = arg->role_db.num_subjects;
++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++ name_set.n_size = arg->role_db.num_objects;
++ inodev_set.i_size = arg->role_db.num_objects;
++
++ if (!subj_map_set.s_size || !acl_role_set.r_size ||
++ !name_set.n_size || !inodev_set.i_size)
++ return 1;
++
++ if (!gr_init_uidset())
++ return 1;
++
++ /* set up the stack that holds allocation info */
++
++ stacksize = arg->role_db.num_pointers + 5;
++
++ if (!acl_alloc_stack_init(stacksize))
++ return 1;
++
++ /* grab reference for the real root dentry and vfsmount */
++ read_lock(&reaper->fs->lock);
++ real_root_mnt = mntget(reaper->fs->root.mnt);
++ real_root = dget(reaper->fs->root.dentry);
++ read_unlock(&reaper->fs->lock);
++
++ fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
++ if (fakefs_obj == NULL)
++ return 1;
++ fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
++
++ subj_map_set.s_hash =
++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
++ acl_role_set.r_hash =
++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
++ inodev_set.i_hash =
++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
++
++ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
++ !name_set.n_hash || !inodev_set.i_hash)
++ return 1;
++
++ memset(subj_map_set.s_hash, 0,
++ sizeof(struct subject_map *) * subj_map_set.s_size);
++ memset(acl_role_set.r_hash, 0,
++ sizeof (struct acl_role_label *) * acl_role_set.r_size);
++ memset(name_set.n_hash, 0,
++ sizeof (struct name_entry *) * name_set.n_size);
++ memset(inodev_set.i_hash, 0,
++ sizeof (struct inodev_entry *) * inodev_set.i_size);
++
++ return 0;
++}
++
++/* free information not needed after startup
++ currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++ __u32 i;
++
++ if (subj_map_set.s_hash) {
++ for (i = 0; i < subj_map_set.s_size; i++) {
++ if (subj_map_set.s_hash[i]) {
++ kfree(subj_map_set.s_hash[i]);
++ subj_map_set.s_hash[i] = NULL;
++ }
++ }
++
++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
++ PAGE_SIZE)
++ kfree(subj_map_set.s_hash);
++ else
++ vfree(subj_map_set.s_hash);
++ }
++
++ return;
++}
++
++static void
++free_variables(void)
++{
++ struct acl_subject_label *s;
++ struct acl_role_label *r;
++ struct task_struct *task, *task2;
++ unsigned int x;
++
++ gr_clear_learn_entries();
++
++ read_lock(&tasklist_lock);
++ do_each_thread(task2, task) {
++ task->acl_sp_role = 0;
++ task->acl_role_id = 0;
++ task->acl = NULL;
++ task->role = NULL;
++ } while_each_thread(task2, task);
++ read_unlock(&tasklist_lock);
++
++ /* release the reference to the real root dentry and vfsmount */
++ if (real_root)
++ dput(real_root);
++ real_root = NULL;
++ if (real_root_mnt)
++ mntput(real_root_mnt);
++ real_root_mnt = NULL;
++
++ /* free all object hash tables */
++
++ FOR_EACH_ROLE_START(r)
++ if (r->subj_hash == NULL)
++ goto next_role;
++ FOR_EACH_SUBJECT_START(r, s, x)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_SUBJECT_END(s, x)
++ FOR_EACH_NESTED_SUBJECT_START(r, s)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_NESTED_SUBJECT_END(s)
++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++ kfree(r->subj_hash);
++ else
++ vfree(r->subj_hash);
++ r->subj_hash = NULL;
++next_role:
++ FOR_EACH_ROLE_END(r)
++
++ acl_free_all();
++
++ if (acl_role_set.r_hash) {
++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++ PAGE_SIZE)
++ kfree(acl_role_set.r_hash);
++ else
++ vfree(acl_role_set.r_hash);
++ }
++ if (name_set.n_hash) {
++ if ((name_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(name_set.n_hash);
++ else
++ vfree(name_set.n_hash);
++ }
++
++ if (inodev_set.i_hash) {
++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
++ PAGE_SIZE)
++ kfree(inodev_set.i_hash);
++ else
++ vfree(inodev_set.i_hash);
++ }
++
++ gr_free_uidset();
++
++ memset(&name_set, 0, sizeof (struct name_db));
++ memset(&inodev_set, 0, sizeof (struct inodev_db));
++ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++ default_role = NULL;
++ role_list = NULL;
++
++ return;
++}
++
++static __u32
++count_user_objs(struct acl_object_label *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++ struct acl_object_label *g_tmp, **guser;
++ unsigned int len;
++ char *tmp;
++
++ if (obj->globbed == NULL)
++ return 0;
++
++ guser = &obj->globbed;
++ while (*guser) {
++ g_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ if (g_tmp == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(g_tmp, *guser,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ len = strnlen_user(g_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, g_tmp->filename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ g_tmp->filename = tmp;
++
++ *guser = g_tmp;
++ guser = &(g_tmp->next);
++ }
++
++ return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++ struct acl_role_label *role)
++{
++ struct acl_object_label *o_tmp;
++ unsigned int len;
++ int ret;
++ char *tmp;
++
++ while (userp) {
++ if ((o_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ userp = o_tmp->prev;
++
++ len = strnlen_user(o_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, o_tmp->filename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ o_tmp->filename = tmp;
++
++ insert_acl_obj_label(o_tmp, subj);
++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
++ return -ENOMEM;
++
++ ret = copy_user_glob(o_tmp);
++ if (ret)
++ return ret;
++
++ if (o_tmp->nested) {
++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
++ if (IS_ERR(o_tmp->nested))
++ return PTR_ERR(o_tmp->nested);
++
++ /* insert into nested subject list */
++ o_tmp->nested->next = role->hash->first;
++ role->hash->first = o_tmp->nested;
++ }
++ }
++
++ return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++ struct acl_subject_label s_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ break;
++
++ userp = s_tmp.prev;
++ /* do not count nested subjects against this count, since
++ they are not included in the hash table, but are
++ attached to objects. We have already counted
++ the subjects in userspace for the allocation
++ stack
++ */
++ if (!(s_tmp.mode & GR_NESTED))
++ num++;
++ }
++
++ return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++ ruserip = rolep->allowed_ips;
++
++ while (ruserip) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_allowed_ip *)
++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, ruserip,
++ sizeof (struct role_allowed_ip)))
++ return -EFAULT;
++
++ ruserip = rtmp->prev;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->allowed_ips = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!ruserip)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
++
++ unsigned int len;
++ char *tmp;
++
++ rusertp = rolep->transitions;
++
++ while (rusertp) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_transition *)
++ acl_alloc(sizeof (struct role_transition))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, rusertp,
++ sizeof (struct role_transition)))
++ return -EFAULT;
++
++ rusertp = rtmp->prev;
++
++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, rtmp->rolename, len))
++ return -EFAULT;
++ tmp[len-1] = '\0';
++ rtmp->rolename = tmp;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->transitions = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!rusertp)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++ unsigned int len;
++ char *tmp;
++ __u32 num_objs;
++ struct acl_ip_label **i_tmp, *i_utmp2;
++ struct gr_hash_struct ghash;
++ struct subject_map *subjmap;
++ unsigned int i_num;
++ int err;
++
++ s_tmp = lookup_subject_map(userp);
++
++ /* we've already copied this subject into the kernel, just return
++ the reference to it, and don't copy it over again
++ */
++ if (s_tmp)
++ return(s_tmp);
++
++ if ((s_tmp = (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++ if (subjmap == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap->user = userp;
++ subjmap->kernel = s_tmp;
++ insert_subj_map_entry(subjmap);
++
++ if (copy_from_user(s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ return ERR_PTR(-EFAULT);
++
++ len = strnlen_user(s_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return ERR_PTR(-EINVAL);
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user(tmp, s_tmp->filename, len))
++ return ERR_PTR(-EFAULT);
++ tmp[len-1] = '\0';
++ s_tmp->filename = tmp;
++
++ if (!strcmp(s_tmp->filename, "/"))
++ role->root_label = s_tmp;
++
++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ return ERR_PTR(-EFAULT);
++
++ /* copy user and group transition tables */
++
++ if (s_tmp->user_trans_num) {
++ uid_t *uidlist;
++
++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
++ if (uidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->user_transitions = uidlist;
++ }
++
++ if (s_tmp->group_trans_num) {
++ gid_t *gidlist;
++
++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
++ if (gidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->group_transitions = gidlist;
++ }
++
++ /* set up object hash table */
++ num_objs = count_user_objs(ghash.first);
++
++ s_tmp->obj_hash_size = num_objs;
++ s_tmp->obj_hash =
++ (struct acl_object_label **)
++ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
++
++ if (!s_tmp->obj_hash)
++ return ERR_PTR(-ENOMEM);
++
++ memset(s_tmp->obj_hash, 0,
++ s_tmp->obj_hash_size *
++ sizeof (struct acl_object_label *));
++
++ /* add in objects */
++ err = copy_user_objs(ghash.first, s_tmp, role);
++
++ if (err)
++ return ERR_PTR(err);
++
++ /* set pointer for parent subject */
++ if (s_tmp->parent_subject) {
++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
++
++ if (IS_ERR(s_tmp2))
++ return s_tmp2;
++
++ s_tmp->parent_subject = s_tmp2;
++ }
++
++ /* add in ip acls */
++
++ if (!s_tmp->ip_num) {
++ s_tmp->ips = NULL;
++ goto insert;
++ }
++
++ i_tmp =
++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
++ sizeof (struct acl_ip_label *));
++
++ if (!i_tmp)
++ return ERR_PTR(-ENOMEM);
++
++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++ *(i_tmp + i_num) =
++ (struct acl_ip_label *)
++ acl_alloc(sizeof (struct acl_ip_label));
++ if (!*(i_tmp + i_num))
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user
++ (&i_utmp2, s_tmp->ips + i_num,
++ sizeof (struct acl_ip_label *)))
++ return ERR_PTR(-EFAULT);
++
++ if (copy_from_user
++ (*(i_tmp + i_num), i_utmp2,
++ sizeof (struct acl_ip_label)))
++ return ERR_PTR(-EFAULT);
++
++ if ((*(i_tmp + i_num))->iface == NULL)
++ continue;
++
++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
++ if (!len || len >= IFNAMSIZ)
++ return ERR_PTR(-EINVAL);
++ tmp = acl_alloc(len);
++ if (tmp == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
++ return ERR_PTR(-EFAULT);
++ (*(i_tmp + i_num))->iface = tmp;
++ }
++
++ s_tmp->ips = i_tmp;
++
++insert:
++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
++ return ERR_PTR(-ENOMEM);
++
++ return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label s_pre;
++ struct acl_subject_label * ret;
++ int err;
++
++ while (userp) {
++ if (copy_from_user(&s_pre, userp,
++ sizeof (struct acl_subject_label)))
++ return -EFAULT;
++
++ /* do not add nested subjects here, add
++ while parsing objects
++ */
++
++ if (s_pre.mode & GR_NESTED) {
++ userp = s_pre.prev;
++ continue;
++ }
++
++ ret = do_copy_user_subj(userp, role);
++
++ err = PTR_ERR(ret);
++ if (IS_ERR(ret))
++ return err;
++
++ insert_acl_subj_label(ret, role);
++
++ userp = s_pre.prev;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct sprole_pw *sptmp;
++ struct gr_hash_struct *ghash;
++ uid_t *domainlist;
++ unsigned int r_num;
++ unsigned int len;
++ char *tmp;
++ int err = 0;
++ __u16 i;
++ __u32 num_subjs;
++
++ /* we need a default and kernel role */
++ if (arg->role_db.num_roles < 2)
++ return -EINVAL;
++
++ /* copy special role authentication info from userspace */
++
++ num_sprole_pws = arg->num_sprole_pws;
++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
++
++ if (!acl_special_roles) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++ if (!sptmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(sptmp, arg->sprole_pws + i,
++ sizeof (struct sprole_pw))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len =
++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(tmp, sptmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ tmp[len-1] = '\0';
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Copying special role %s\n", tmp);
++#endif
++ sptmp->rolename = tmp;
++ acl_special_roles[i] = sptmp;
++ }
++
++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++ if (!r_tmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(&r_utmp2, r_utmp + r_num,
++ sizeof (struct acl_role_label *))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (copy_from_user(r_tmp, r_utmp2,
++ sizeof (struct acl_role_label))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= PATH_MAX) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(tmp, r_tmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ tmp[len-1] = '\0';
++ r_tmp->rolename = tmp;
++
++ if (!strcmp(r_tmp->rolename, "default")
++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++ default_role = r_tmp;
++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++ kernel_role = r_tmp;
++ }
++
++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ r_tmp->hash = ghash;
++
++ num_subjs = count_user_subjs(r_tmp->hash->first);
++
++ r_tmp->subj_hash_size = num_subjs;
++ r_tmp->subj_hash =
++ (struct acl_subject_label **)
++ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
++
++ if (!r_tmp->subj_hash) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ err = copy_user_allowedips(r_tmp);
++ if (err)
++ goto cleanup;
++
++ /* copy domain info */
++ if (r_tmp->domain_children != NULL) {
++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
++ if (domainlist == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ r_tmp->domain_children = domainlist;
++ }
++
++ err = copy_user_transitions(r_tmp);
++ if (err)
++ goto cleanup;
++
++ memset(r_tmp->subj_hash, 0,
++ r_tmp->subj_hash_size *
++ sizeof (struct acl_subject_label *));
++
++ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
++
++ if (err)
++ goto cleanup;
++
++ /* set nested subject list to null */
++ r_tmp->hash->first = NULL;
++
++ insert_acl_role_label(r_tmp);
++ }
++
++ goto return_err;
++ cleanup:
++ free_variables();
++ return_err:
++ return err;
++
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++ int error = 0;
++
++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++ if (init_variables(args)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++ error = -ENOMEM;
++ free_variables();
++ goto out;
++ }
++
++ error = copy_user_acl(args);
++ free_init_variables();
++ if (error) {
++ free_variables();
++ goto out;
++ }
++
++ if ((error = gr_set_acls(0))) {
++ free_variables();
++ goto out;
++ }
++
++ pax_open_kernel();
++ gr_status |= GR_READY;
++ pax_close_kernel();
++
++ out:
++ return error;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *p, const char *n)
++{
++ char c;
++
++ while ((c = *p++) != '\0') {
++ switch (c) {
++ case '?':
++ if (*n == '\0')
++ return 1;
++ else if (*n == '/')
++ return 1;
++ break;
++ case '\\':
++ if (*n != c)
++ return 1;
++ break;
++ case '*':
++ for (c = *p++; c == '?' || c == '*'; c = *p++) {
++ if (*n == '/')
++ return 1;
++ else if (c == '?') {
++ if (*n == '\0')
++ return 1;
++ else
++ ++n;
++ }
++ }
++ if (c == '\0') {
++ return 0;
++ } else {
++ const char *endp;
++
++ if ((endp = strchr(n, '/')) == NULL)
++ endp = n + strlen(n);
++
++ if (c == '[') {
++ for (--p; n < endp; ++n)
++ if (!glob_match(p, n))
++ return 0;
++ } else if (c == '/') {
++ while (*n != '\0' && *n != '/')
++ ++n;
++ if (*n == '/' && !glob_match(p, n + 1))
++ return 0;
++ } else {
++ for (--p; n < endp; ++n)
++ if (*n == c && !glob_match(p, n))
++ return 0;
++ }
++
++ return 1;
++ }
++ case '[':
++ {
++ int not;
++ char cold;
++
++ if (*n == '\0' || *n == '/')
++ return 1;
++
++ not = (*p == '!' || *p == '^');
++ if (not)
++ ++p;
++
++ c = *p++;
++ for (;;) {
++ unsigned char fn = (unsigned char)*n;
++
++ if (c == '\0')
++ return 1;
++ else {
++ if (c == fn)
++ goto matched;
++ cold = c;
++ c = *p++;
++
++ if (c == '-' && *p != ']') {
++ unsigned char cend = *p++;
++
++ if (cend == '\0')
++ return 1;
++
++ if (cold <= fn && fn <= cend)
++ goto matched;
++
++ c = *p++;
++ }
++ }
++
++ if (c == ']')
++ break;
++ }
++ if (!not)
++ return 1;
++ break;
++ matched:
++ while (c != ']') {
++ if (c == '\0')
++ return 1;
++
++ c = *p++;
++ }
++ if (not)
++ return 1;
++ }
++ break;
++ default:
++ if (c != *n)
++ return 1;
++ }
++
++ ++n;
++ }
++
++ if (*n == '\0')
++ return 0;
++
++ if (*n == '/')
++ return 0;
++
++ return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++ struct dentry *dentry, struct vfsmount *mnt, char **path)
++{
++ struct acl_object_label *tmp;
++
++ if (*path == NULL)
++ *path = gr_to_filename_nolock(dentry, mnt);
++
++ tmp = globbed;
++
++ while (tmp) {
++ if (!glob_match(tmp->filename, *path))
++ return tmp;
++ tmp = tmp->next;
++ }
++
++ return NULL;
++}
++
++static struct acl_object_label *
++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ const ino_t curr_ino, const dev_t curr_dev,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ struct acl_subject_label *tmpsubj;
++ struct acl_object_label *retval;
++ struct acl_object_label *retval2;
++
++ tmpsubj = (struct acl_subject_label *) subj;
++ read_lock(&gr_inode_lock);
++ do {
++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
++ if (retval) {
++ if (checkglob && retval->globbed) {
++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
++ (struct vfsmount *)orig_mnt, path);
++ if (retval2)
++ retval = retval2;
++ }
++ break;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ const struct dentry *curr_dentry,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ return __full_lookup(orig_dentry, orig_mnt,
++ curr_dentry->d_inode->i_ino,
++ curr_dentry->d_inode->i_sb->s_dev, subj, path, checkglob);
++}
++
++static struct acl_object_label *
++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path, const int checkglob)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_object_label *retval;
++
++ spin_lock(&dcache_lock);
++
++ if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt ||
++ /* ignore Eric Biederman */
++ IS_PRIVATE(l_dentry->d_inode))) {
++ retval = fakefs_obj;
++ goto out;
++ }
++
++ for (;;) {
++ if (dentry == real_root && mnt == real_root_mnt)
++ break;
++
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
++out:
++ spin_unlock(&dcache_lock);
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 0);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path)
++{
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
++}
++
++static struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_role_label *role)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_subject_label *retval;
++
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (dentry == real_root && mnt == real_root_mnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++
++ if (unlikely(retval == NULL)) {
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
++ real_root->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ }
++out:
++ spin_unlock(&dcache_lock);
++
++ return retval;
++}
++
++static void
++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, NIPQUAD(task->signal->curr_ip));
++
++ return;
++}
++
++static void
++gr_log_learn_sysctl(const char *path, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, path, (unsigned long) mode, NIPQUAD(task->signal->curr_ip));
++
++ return;
++}
++
++static void
++gr_log_learn_id_change(const char type, const unsigned int real,
++ const unsigned int effective, const unsigned int fs)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ type, real, effective, fs, NIPQUAD(task->signal->curr_ip));
++
++ return;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++ struct acl_object_label *obj;
++ __u32 oldmode, newmode;
++ __u32 needmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (GR_CREATE | GR_LINK);
++
++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++ oldmode = obj->mode;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ oldmode |= (GR_CREATE | GR_LINK);
++
++ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
++ needmode |= GR_SETID | GR_AUDIT_SETID;
++
++ newmode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ oldmode | needmode);
++
++ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
++ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
++ GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
++ goto bad;
++
++ if ((oldmode & needmode) != needmode)
++ goto bad;
++
++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
++ if ((newmode & needmode) != needmode)
++ goto bad;
++
++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
++ return newmode;
++bad:
++ needmode = oldmode;
++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
++ needmode |= GR_SETID;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ gr_log_learn(old_dentry, old_mnt, needmode);
++ return (GR_CREATE | GR_LINK);
++ } else if (newmode & GR_SUPPRESS)
++ return GR_SUPPRESS;
++ else
++ return 0;
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++ const struct vfsmount * mnt)
++{
++ __u32 retval = mode;
++ struct acl_subject_label *curracl;
++ struct acl_object_label *currobj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ curracl = current->acl;
++
++ currobj = chk_obj_label(dentry, mnt, curracl);
++ retval = currobj->mode & mode;
++
++ if (unlikely
++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ retval = new_mode;
++
++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
++ new_mode |= GR_INHERIT;
++
++ if (!(mode & GR_NOLEARN))
++ gr_log_learn(dentry, mnt, new_mode);
++ }
++
++ return retval;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++ const struct vfsmount * mnt, const __u32 mode)
++{
++ struct name_entry *match;
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *curracl;
++ char *path;
++ __u32 retval;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ preempt_disable();
++ path = gr_to_filename_rbac(new_dentry, mnt);
++ match = lookup_name_entry_create(path);
++
++ if (!match)
++ goto check_parent;
++
++ curracl = current->acl;
++
++ read_lock(&gr_inode_lock);
++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++ read_unlock(&gr_inode_lock);
++
++ if (matchpo) {
++ if ((matchpo->mode & mode) !=
++ (mode & ~(GR_AUDITS | GR_SUPPRESS))
++ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(new_dentry, mnt, new_mode);
++
++ preempt_enable();
++ return new_mode;
++ }
++ preempt_enable();
++ return (matchpo->mode & mode);
++ }
++
++ check_parent:
++ curracl = current->acl;
++
++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++ retval = matchpo->mode & mode;
++
++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(new_dentry, mnt, new_mode);
++ preempt_enable();
++ return new_mode;
++ }
++
++ preempt_enable();
++ return retval;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY) || !task))
++ return 0;
++
++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++ task->acl != current->acl)
++ return 1;
++
++ return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ tsk->signal->used_accept = 0;
++ tsk->acl_sp_role = 0;
++ tsk->acl_role_id = current->acl_role_id;
++ tsk->acl = current->acl;
++ tsk->role = current->role;
++ tsk->signal->curr_ip = current->signal->curr_ip;
++ if (current->exec_file)
++ get_file(current->exec_file);
++ tsk->exec_file = current->exec_file;
++ tsk->is_writable = current->is_writable;
++ if (unlikely(current->signal->used_accept))
++ current->signal->curr_ip = 0;
++
++ return;
++}
++
++static void
++gr_set_proc_res(struct task_struct *task)
++{
++ struct acl_subject_label *proc;
++ unsigned short i;
++
++ proc = task->acl;
++
++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
++ return;
++
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(proc->resmask & (1 << i)))
++ continue;
++
++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
++ }
++
++ return;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ uid_t *uidlist;
++ int curuid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('u', real, effective, fs);
++
++ num = current->acl->user_trans_num;
++ uidlist = current->acl->user_transitions;
++
++ if (uidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ realok = 1;
++ if (effective == curuid)
++ effectiveok = 1;
++ if (fs == curuid)
++ fsok = 1;
++ }
++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ break;
++ if (effective == curuid)
++ break;
++ if (fs == curuid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ gid_t *gidlist;
++ int curgid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('g', real, effective, fs);
++
++ num = current->acl->group_trans_num;
++ gidlist = current->acl->group_transitions;
++
++ if (gidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ realok = 1;
++ if (effective == curgid)
++ effectiveok = 1;
++ if (fs == curgid)
++ fsok = 1;
++ }
++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ break;
++ if (effective == curgid)
++ break;
++ if (fs == curgid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++void
++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++{
++ struct acl_role_label *role = task->role;
++ struct acl_subject_label *subj = NULL;
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ filp = task->exec_file;
++
++ /* kernel process, we'll give them the kernel role */
++ if (unlikely(!filp)) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ return;
++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
++ role = lookup_acl_role_label(task, uid, gid);
++
++ /* perform subject lookup in possibly new role
++ we can use this result below in the case where role == task->role
++ */
++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
++
++ /* if we changed uid/gid, but result in the same role
++ and are using inheritance, don't lose the inherited subject
++ if current subject is other than what normal lookup
++ would result in, we arrived via inheritance, don't
++ lose subject
++ */
++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
++ (subj == task->acl)))
++ task->acl = subj;
++
++ task->role = role;
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++
++ gr_set_proc_res(task);
++
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_share)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *newacl;
++ struct acl_object_label *obj;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ newacl = chk_subj_label(dentry, mnt, task->role);
++
++ task_lock(task);
++ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
++ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
++ !(task->role->roletype & GR_ROLE_GOD) &&
++ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
++ task_unlock(task);
++ if (unsafe_share)
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
++ else
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
++ return -EACCES;
++ }
++ task_unlock(task);
++
++ obj = chk_obj_label(dentry, mnt, task->acl);
++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if (!(task->acl->mode & GR_INHERITLEARN) &&
++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
++ if (obj->nested)
++ task->acl = obj->nested;
++ else
++ task->acl = newacl;
++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(dentry, mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(dentry, mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ return 0;
++}
++
++/* always called with valid inodev ptr */
++static void
++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
++{
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *matchps;
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++
++ FOR_EACH_ROLE_START(role)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ if (subj->inode == ino && subj->device == dev)
++ subj->mode |= GR_DELETED;
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
++ matchps->mode |= GR_DELETED;
++ FOR_EACH_ROLE_END(role)
++
++ inodev->nentry->deleted = 1;
++
++ return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ struct inodev_entry *inodev;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ write_lock(&gr_inode_lock);
++ inodev = lookup_inodev_entry(ino, dev);
++ if (inodev != NULL)
++ do_handle_delete(inodev, ino, dev);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_subject_label *subj)
++{
++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ subj->obj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_obj_label(match, subj);
++ }
++
++ return;
++}
++
++static void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_role_label *role)
++{
++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ role->subj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_subj_label(match, role);
++ }
++
++ return;
++}
++
++static void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice)
++{
++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != oldinode ||
++ match->nentry->device != olddevice || !match->nentry->deleted))
++ match = match->next;
++
++ if (match && (match->nentry->inode == oldinode)
++ && (match->nentry->device == olddevice) &&
++ match->nentry->deleted) {
++ if (match->prev == NULL) {
++ inodev_set.i_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->nentry->inode = newinode;
++ match->nentry->device = newdevice;
++ match->nentry->deleted = 0;
++
++ insert_inodev_entry(match);
++ }
++
++ return;
++}
++
++static void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++ const struct vfsmount *mnt)
++{
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++
++ FOR_EACH_ROLE_START(role)
++ update_acl_subj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ if ((subj->inode == dentry->d_inode->i_ino) &&
++ (subj->device == dentry->d_inode->i_sb->s_dev)) {
++ subj->inode = dentry->d_inode->i_ino;
++ subj->device = dentry->d_inode->i_sb->s_dev;
++ }
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ update_acl_obj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, subj);
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_ROLE_END(role)
++
++ update_inodev_entry(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
++
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ do_handle_create(matchn, dentry, mnt);
++ write_unlock(&gr_inode_lock);
++ }
++ preempt_enable();
++
++ return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ struct name_entry *matchn;
++ struct inodev_entry *inodev;
++
++ /* vfs_rename swaps the name and parent link for old_dentry and
++ new_dentry
++ at this point, old_dentry has the new name, parent link, and inode
++ for the renamed file
++ if a file is being replaced by a rename, new_dentry has the inode
++ and name for the replaced file
++ */
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
++
++ /* we wouldn't have to check d_inode if it weren't for
++ NFS silly-renaming
++ */
++
++ write_lock(&gr_inode_lock);
++ if (unlikely(replace && new_dentry->d_inode)) {
++ inodev = lookup_inodev_entry(new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev);
++ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
++ do_handle_delete(inodev, new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev);
++ }
++
++ inodev = lookup_inodev_entry(old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev);
++ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
++ do_handle_delete(inodev, old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev);
++
++ if (unlikely((unsigned long)matchn))
++ do_handle_create(matchn, old_dentry, mnt);
++
++ write_unlock(&gr_inode_lock);
++ preempt_enable();
++
++ return;
++}
++
++static int
++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
++ unsigned char **sum)
++{
++ struct acl_role_label *r;
++ struct role_allowed_ip *ipp;
++ struct role_transition *trans;
++ unsigned int i;
++ int found = 0;
++
++ /* check transition table */
++
++ for (trans = current->role->transitions; trans; trans = trans->next) {
++ if (!strcmp(rolename, trans->rolename)) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found)
++ return 0;
++
++ /* handle special roles that do not require authentication
++ and check ip */
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ found = 0;
++ if (r->allowed_ips != NULL) {
++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++ if ((ntohl(current->signal->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask))
++ found = 1;
++ }
++ } else
++ found = 2;
++ if (!found)
++ return 0;
++
++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
++ *salt = NULL;
++ *sum = NULL;
++ return 1;
++ }
++ }
++ FOR_EACH_ROLE_END(r)
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
++ *salt = acl_special_roles[i]->salt;
++ *sum = acl_special_roles[i]->sum;
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static void
++assign_special_role(char *rolename)
++{
++ struct acl_object_label *obj;
++ struct acl_role_label *r;
++ struct acl_role_label *assigned = NULL;
++ struct task_struct *tsk;
++ struct file *filp;
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ assigned = r;
++ break;
++ }
++ FOR_EACH_ROLE_END(r)
++
++ if (!assigned)
++ return;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++
++ tsk = current->parent;
++ if (tsk == NULL)
++ goto out_unlock;
++
++ filp = tsk->exec_file;
++ if (filp == NULL)
++ goto out_unlock;
++
++ tsk->is_writable = 0;
++
++ tsk->acl_sp_role = 1;
++ tsk->acl_role_id = ++acl_sp_role_value;
++ tsk->role = assigned;
++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
++#endif
++
++out_unlock:
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++}
++
++int gr_check_secure_terminal(struct task_struct *task)
++{
++ struct task_struct *p, *p2, *p3;
++ struct files_struct *files;
++ struct fdtable *fdt;
++ struct file *our_file = NULL, *file;
++ int i;
++
++ if (task->signal->tty == NULL)
++ return 1;
++
++ files = get_files_struct(task);
++ if (files != NULL) {
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
++ get_file(file);
++ our_file = file;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ }
++
++ if (our_file == NULL)
++ return 1;
++
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ files = get_files_struct(p);
++ if (files == NULL ||
++ (p->signal && p->signal->tty == task->signal->tty)) {
++ if (files != NULL)
++ put_files_struct(files);
++ continue;
++ }
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
++ p3 = task;
++ while (p3->pid > 0) {
++ if (p3 == p)
++ break;
++ p3 = p3->parent;
++ }
++ if (p3 == p)
++ break;
++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
++ gr_handle_alertkill(p);
++ rcu_read_unlock();
++ put_files_struct(files);
++ read_unlock(&tasklist_lock);
++ fput(our_file);
++ return 0;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++
++ fput(our_file);
++ return 1;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++{
++ struct gr_arg_wrapper uwrap;
++ unsigned char *sprole_salt;
++ unsigned char *sprole_sum;
++ int error = sizeof (struct gr_arg_wrapper);
++ int error2 = 0;
++
++ down(&gr_dev_sem);
++
++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
++ error = -EPERM;
++ goto out;
++ }
++
++ if (count != sizeof (struct gr_arg_wrapper)) {
++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
++ error = -EINVAL;
++ goto out;
++ }
++
++
++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
++ gr_auth_expires = 0;
++ gr_auth_attempts = 0;
++ }
++
++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
++ error = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(gr_auth_expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ /* if non-root trying to do anything other than use a special role,
++ do not attempt authentication, do not count towards authentication
++ locking
++ */
++
++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++ current_uid()) {
++ error = -EPERM;
++ goto out;
++ }
++
++ /* ensure pw and special role name are null terminated */
++
++ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++ /* Okay.
++ * We have our enough of the argument structure..(we have yet
++ * to copy_from_user the tables themselves) . Copy the tables
++ * only if we need them, i.e. for loading operations. */
++
++ switch (gr_usermode->mode) {
++ case GR_STATUS:
++ if (gr_status & GR_READY) {
++ error = 1;
++ if (!gr_check_secure_terminal(current))
++ error = 3;
++ } else
++ error = 2;
++ goto out;
++ case GR_SHUTDOWN:
++ if ((gr_status & GR_READY)
++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ pax_open_kernel();
++ gr_status &= ~GR_READY;
++ pax_close_kernel();
++
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
++ free_variables();
++ memset(gr_usermode, 0, sizeof (struct gr_arg));
++ memset(gr_system_salt, 0, GR_SALT_LEN);
++ memset(gr_system_sum, 0, GR_SHA_LEN);
++ } else if (gr_status & GR_READY) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
++ error = -EPERM;
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
++ error = -EAGAIN;
++ }
++ break;
++ case GR_ENABLE:
++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
++ else {
++ if (gr_status & GR_READY)
++ error = -EAGAIN;
++ else
++ error = error2;
++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
++ }
++ break;
++ case GR_RELOAD:
++ if (!(gr_status & GR_READY)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
++ error = -EAGAIN;
++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ lock_kernel();
++
++ pax_open_kernel();
++ gr_status &= ~GR_READY;
++ pax_close_kernel();
++
++ free_variables();
++ if (!(error2 = gracl_init(gr_usermode))) {
++ unlock_kernel();
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
++ } else {
++ unlock_kernel();
++ error = error2;
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ }
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ error = -EPERM;
++ }
++ break;
++ case GR_SEGVMOD:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
++ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++ struct acl_subject_label *segvacl;
++ segvacl =
++ lookup_acl_subj_label(gr_usermode->segv_inode,
++ gr_usermode->segv_device,
++ current->role);
++ if (segvacl) {
++ segvacl->crashes = 0;
++ segvacl->expires = 0;
++ }
++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
++ gr_remove_uid(gr_usermode->segv_uid);
++ }
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
++ error = -EPERM;
++ }
++ break;
++ case GR_SPROLE:
++ case GR_SPROLEPAM:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (lookup_special_role_auth
++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++ char *p = "";
++ assign_special_role(gr_usermode->sp_role);
++ read_lock(&tasklist_lock);
++ if (current->parent)
++ p = current->parent->role->rolename;
++ read_unlock(&tasklist_lock);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
++ p, acl_sp_role_value);
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
++ error = -EPERM;
++ if(!(current->role->auth_attempts++))
++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ goto out;
++ }
++ break;
++ case GR_UNSPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->roletype & GR_ROLE_SPECIAL) {
++ char *p = "";
++ int i = 0;
++
++ read_lock(&tasklist_lock);
++ if (current->parent) {
++ p = current->parent->role->rolename;
++ i = current->parent->acl_role_id;
++ }
++ read_unlock(&tasklist_lock);
++
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
++ gr_set_acls(1);
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_UNSPROLEF_ACL_MSG, current->role->rolename);
++ error = -EPERM;
++ goto out;
++ }
++ break;
++ default:
++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
++ error = -EINVAL;
++ break;
++ }
++
++ if (error != -EPERM)
++ goto out;
++
++ if(!(gr_auth_attempts++))
++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ out:
++ up(&gr_dev_sem);
++ return error;
++}
++
++int
++gr_set_acls(const int type)
++{
++ struct acl_object_label *obj;
++ struct task_struct *task, *task2;
++ struct file *filp;
++ struct acl_role_label *role = current->role;
++ __u16 acl_role_id = current->acl_role_id;
++ const struct cred *cred;
++ char *tmpname;
++ struct name_entry *nmatch;
++ struct acl_subject_label *tmpsubj;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ do_each_thread(task2, task) {
++ /* check to see if we're called from the exit handler,
++ if so, only replace ACLs that have inherited the admin
++ ACL */
++
++ if (type && (task->role != role ||
++ task->acl_role_id != acl_role_id))
++ continue;
++
++ task->acl_role_id = 0;
++ task->acl_sp_role = 0;
++
++ if ((filp = task->exec_file)) {
++ cred = __task_cred(task);
++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
++
++ /* the following is to apply the correct subject
++ on binaries running when the RBAC system
++ is enabled, when the binaries have been
++ replaced or deleted since their execution
++ -----
++ when the RBAC system starts, the inode/dev
++ from exec_file will be one the RBAC system
++ is unaware of. It only knows the inode/dev
++ of the present file on disk, or the absence
++ of it.
++ */
++ preempt_disable();
++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
++
++ nmatch = lookup_name_entry(tmpname);
++ preempt_enable();
++ tmpsubj = NULL;
++ if (nmatch) {
++ if (nmatch->deleted)
++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
++ else
++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
++ if (tmpsubj != NULL)
++ task->acl = tmpsubj;
++ }
++ if (tmpsubj == NULL)
++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
++ task->role);
++ if (task->acl) {
++ struct acl_subject_label *curr;
++ curr = task->acl;
++
++ task->is_writable = 0;
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ } else {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
++ return 1;
++ }
++ } else {
++ // it's a kernel process
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_PROCFIND;
++#endif
++ }
++ } while_each_thread(task2, task);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 0;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ struct acl_subject_label *acl;
++ const struct cred *cred;
++
++ if (unlikely((gr_status & GR_READY) &&
++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
++ goto skip_reslog;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ gr_log_resource(task, res, wanted, gt);
++#endif
++ skip_reslog:
++
++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
++ return;
++
++ acl = task->acl;
++
++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
++ !(acl->resmask & (1 << (unsigned short) res))))
++ return;
++
++ if (wanted >= acl->res[res].rlim_cur) {
++ unsigned long res_add;
++
++ res_add = wanted;
++ switch (res) {
++ case RLIMIT_CPU:
++ res_add += GR_RLIM_CPU_BUMP;
++ break;
++ case RLIMIT_FSIZE:
++ res_add += GR_RLIM_FSIZE_BUMP;
++ break;
++ case RLIMIT_DATA:
++ res_add += GR_RLIM_DATA_BUMP;
++ break;
++ case RLIMIT_STACK:
++ res_add += GR_RLIM_STACK_BUMP;
++ break;
++ case RLIMIT_CORE:
++ res_add += GR_RLIM_CORE_BUMP;
++ break;
++ case RLIMIT_RSS:
++ res_add += GR_RLIM_RSS_BUMP;
++ break;
++ case RLIMIT_NPROC:
++ res_add += GR_RLIM_NPROC_BUMP;
++ break;
++ case RLIMIT_NOFILE:
++ res_add += GR_RLIM_NOFILE_BUMP;
++ break;
++ case RLIMIT_MEMLOCK:
++ res_add += GR_RLIM_MEMLOCK_BUMP;
++ break;
++ case RLIMIT_AS:
++ res_add += GR_RLIM_AS_BUMP;
++ break;
++ case RLIMIT_LOCKS:
++ res_add += GR_RLIM_LOCKS_BUMP;
++ break;
++ case RLIMIT_SIGPENDING:
++ res_add += GR_RLIM_SIGPENDING_BUMP;
++ break;
++ case RLIMIT_MSGQUEUE:
++ res_add += GR_RLIM_MSGQUEUE_BUMP;
++ break;
++ case RLIMIT_NICE:
++ res_add += GR_RLIM_NICE_BUMP;
++ break;
++ case RLIMIT_RTPRIO:
++ res_add += GR_RLIM_RTPRIO_BUMP;
++ break;
++ case RLIMIT_RTTIME:
++ res_add += GR_RLIM_RTTIME_BUMP;
++ break;
++ }
++
++ acl->res[res].rlim_cur = res_add;
++
++ if (wanted > acl->res[res].rlim_max)
++ acl->res[res].rlim_max = res_add;
++
++ /* only log the subject filename, since resource logging is supported for
++ single-subject learning only */
++ cred = __task_cred(task);
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid, cred->gid, acl->filename,
++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
++ "", (unsigned long) res, NIPQUAD(task->signal->curr_ip));
++ }
++
++ return;
++}
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *proc;
++ unsigned long flags;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ flags = pax_get_flags(task);
++
++ proc = task->acl;
++
++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
++ flags &= ~MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
++ flags &= ~MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
++ flags &= ~MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
++ flags &= ~MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
++ flags &= ~MF_PAX_MPROTECT;
++
++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
++ flags |= MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
++ flags |= MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
++ flags |= MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
++ flags |= MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
++ flags |= MF_PAX_MPROTECT;
++
++ pax_set_flags(task, flags);
++
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++/* Eric Biederman likes breaking userland ABI and every inode-based security
++ system to save 35kb of memory */
++
++/* we modify the passed in filename, but adjust it back before returning */
++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
++{
++ struct name_entry *nmatch;
++ char *p, *lastp = NULL;
++ struct acl_object_label *obj = NULL, *tmp;
++ struct acl_subject_label *tmpsubj;
++ char c = '\0';
++
++ read_lock(&gr_inode_lock);
++
++ p = name + len - 1;
++ do {
++ nmatch = lookup_name_entry(name);
++ if (lastp != NULL)
++ *lastp = c;
++
++ if (nmatch == NULL)
++ goto next_component;
++ tmpsubj = current->acl;
++ do {
++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
++ if (obj != NULL) {
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, name)) {
++ obj = tmp;
++ goto found_obj;
++ }
++ tmp = tmp->next;
++ }
++ goto found_obj;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++next_component:
++ /* end case */
++ if (p == name)
++ break;
++
++ while (*p != '/')
++ p--;
++ if (p == name)
++ lastp = p + 1;
++ else {
++ lastp = p;
++ p--;
++ }
++ c = *lastp;
++ *lastp = '\0';
++ } while (1);
++found_obj:
++ read_unlock(&gr_inode_lock);
++ /* obj returned will always be non-null */
++ return obj;
++}
++
++/* returns 0 when allowing, non-zero on error
++ op of 0 is used for readdir, so we don't log the names of hidden files
++*/
++__u32
++gr_handle_sysctl(const struct ctl_table *table, const int op)
++{
++ ctl_table *tmp;
++ const char *proc_sys = "/proc/sys";
++ char *path;
++ struct acl_object_label *obj;
++ unsigned short len = 0, pos = 0, depth = 0, i;
++ __u32 err = 0;
++ __u32 mode = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ /* for now, ignore operations on non-sysctl entries if it's not a
++ readdir*/
++ if (table->child != NULL && op != 0)
++ return 0;
++
++ mode |= GR_FIND;
++ /* it's only a read if it's an entry, read on dirs is for readdir */
++ if (op & MAY_READ)
++ mode |= GR_READ;
++ if (op & MAY_WRITE)
++ mode |= GR_WRITE;
++
++ preempt_disable();
++
++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++
++ /* it's only a read/write if it's an actual entry, not a dir
++ (which are opened for readdir)
++ */
++
++ /* convert the requested sysctl entry into a pathname */
++
++ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ len += strlen(tmp->procname);
++ len++;
++ depth++;
++ }
++
++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
++ /* deny */
++ goto out;
++ }
++
++ memset(path, 0, PAGE_SIZE);
++
++ memcpy(path, proc_sys, strlen(proc_sys));
++
++ pos += strlen(proc_sys);
++
++ for (; depth > 0; depth--) {
++ path[pos] = '/';
++ pos++;
++ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ if (depth == i) {
++ memcpy(path + pos, tmp->procname,
++ strlen(tmp->procname));
++ pos += strlen(tmp->procname);
++ }
++ i++;
++ }
++ }
++
++ obj = gr_lookup_by_name(path, pos);
++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
++
++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
++ ((err & mode) != mode))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ err = 0;
++ gr_log_learn_sysctl(path, new_mode);
++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
++ err = -ENOENT;
++ } else if (!(err & GR_FIND)) {
++ err = -ENOENT;
++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = -EACCES;
++ } else if ((err & mode) != mode) {
++ err = -EACCES;
++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = 0;
++ } else
++ err = 0;
++
++ out:
++ preempt_enable();
++
++ return err;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ filp = task->exec_file;
++
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++
++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 1;
++ }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY)) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 0;
++ }
++#endif
++
++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++
++ if (retmode & GR_NOPTRACE)
++ return 1;
++
++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl || (current->acl != current->role->root_label
++ && current->pid != task->pid)))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++
++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
++ read_unlock(&tasklist_lock);
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++ read_unlock(&tasklist_lock);
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY))
++ return 0;
++#endif
++
++ read_lock(&grsec_exec_file_lock);
++ if (unlikely(!task->exec_file)) {
++ read_unlock(&grsec_exec_file_lock);
++ return 0;
++ }
++
++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++
++ if (retmode & GR_NOPTRACE) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ if (retmode & GR_PTRACERD) {
++ switch (request) {
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
++ case PTRACE_SETREGS:
++ case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++ case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++ case PTRACE_SETVRREGS:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++ } else if (!(current->acl->mode & GR_POVERRIDE) &&
++ !(current->role->roletype & GR_ROLE_GOD) &&
++ (current->acl != task->acl)) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int is_writable_mmap(const struct file *filp)
++{
++ struct task_struct *task = current;
++ struct acl_object_label *obj, *obj2;
++
++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode)) {
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
++ task->role->root_label);
++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ unsigned long runtime;
++ unsigned long cputime;
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ struct timespec timeval;
++
++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++ !(task->acl->mode & GR_PROCACCT)))
++ return;
++
++ do_posix_clock_monotonic_gettime(&timeval);
++ runtime = timeval.tv_sec - task->start_time.tv_sec;
++ wday = runtime / (3600 * 24);
++ runtime -= wday * (3600 * 24);
++ whr = runtime / 3600;
++ runtime -= whr * 3600;
++ wmin = runtime / 60;
++ runtime -= wmin * 60;
++ wsec = runtime;
++
++ cputime = (task->utime + task->stime) / HZ;
++ cday = cputime / (3600 * 24);
++ cputime -= cday * (3600 * 24);
++ chr = cputime / 3600;
++ cputime -= chr * 3600;
++ cmin = cputime / 60;
++ cputime -= cmin * 60;
++ csec = cputime;
++
++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
++
++ return;
++}
++
++void gr_set_kernel_label(struct task_struct *task)
++{
++ if (gr_status & GR_READY) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ }
++ return;
++}
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ struct task_struct *task;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred;
++#endif
++ int ret = 0;
++
++ /* restrict taskstats viewing to un-chrooted root users
++ who have the 'view' subject flag if the RBAC system is enabled
++ */
++
++ read_lock(&tasklist_lock);
++ task = find_task_by_vpid(pid);
++ if (task) {
++ task_lock(task);
++#ifdef CONFIG_GRKERNSEC_CHROOT
++ if (proc_is_chrooted(task))
++ ret = -EACCES;
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ cred = __task_cred(task);
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->uid != 0)
++ ret = -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
++ ret = -EACCES;
++#endif
++#endif
++ if (gr_status & GR_READY) {
++ if (!(task->acl->mode & GR_VIEW))
++ ret = -EACCES;
++ }
++
++ task_unlock(task);
++ } else
++ ret = -ENOENT;
++
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++#endif
++
++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
++{
++ struct task_struct *task = current;
++ struct dentry *dentry = file->f_path.dentry;
++ struct vfsmount *mnt = file->f_path.mnt;
++ struct acl_object_label *obj, *tmp;
++ struct acl_subject_label *subj;
++ unsigned int bufsize;
++ int is_not_root;
++ char *path;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 1;
++
++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ return 1;
++
++ /* ignore Eric Biederman */
++ if (IS_PRIVATE(dentry->d_inode))
++ return 1;
++
++ subj = task->acl;
++ do {
++ obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj);
++ if (obj != NULL)
++ return (obj->mode & GR_FIND) ? 1 : 0;
++ } while ((subj = subj->parent_subject));
++
++ /* this is purely an optimization since we're looking for an object
++ for the directory we're doing a readdir on
++ if it's possible for any globbed object to match the entry we're
++ filling into the directory, then the object we find here will be
++ an anchor point with attached globbed objects
++ */
++ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
++ if (obj->globbed == NULL)
++ return (obj->mode & GR_FIND) ? 1 : 0;
++
++ is_not_root = ((obj->filename[0] == '/') &&
++ (obj->filename[1] == '\0')) ? 0 : 1;
++ bufsize = PAGE_SIZE - namelen - is_not_root;
++
++ /* check bufsize > PAGE_SIZE || bufsize == 0 */
++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
++ return 1;
++
++ preempt_disable();
++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ bufsize);
++
++ bufsize = strlen(path);
++
++ /* if base is "/", don't append an additional slash */
++ if (is_not_root)
++ *(path + bufsize) = '/';
++ memcpy(path + bufsize + is_not_root, name, namelen);
++ *(path + bufsize + namelen + is_not_root) = '\0';
++
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, path)) {
++ preempt_enable();
++ return (tmp->mode & GR_FIND) ? 1 : 0;
++ }
++ tmp = tmp->next;
++ }
++ preempt_enable();
++ return (obj->mode & GR_FIND) ? 1 : 0;
++}
++
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
++
+diff -urNp linux-2.6.31.7/grsecurity/gracl_cap.c linux-2.6.31.7/grsecurity/gracl_cap.c
+--- linux-2.6.31.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_cap.c 2009-12-08 17:39:44.226685094 -0500
+@@ -0,0 +1,131 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static const char *captab_log[] = {
++ "CAP_CHOWN",
++ "CAP_DAC_OVERRIDE",
++ "CAP_DAC_READ_SEARCH",
++ "CAP_FOWNER",
++ "CAP_FSETID",
++ "CAP_KILL",
++ "CAP_SETGID",
++ "CAP_SETUID",
++ "CAP_SETPCAP",
++ "CAP_LINUX_IMMUTABLE",
++ "CAP_NET_BIND_SERVICE",
++ "CAP_NET_BROADCAST",
++ "CAP_NET_ADMIN",
++ "CAP_NET_RAW",
++ "CAP_IPC_LOCK",
++ "CAP_IPC_OWNER",
++ "CAP_SYS_MODULE",
++ "CAP_SYS_RAWIO",
++ "CAP_SYS_CHROOT",
++ "CAP_SYS_PTRACE",
++ "CAP_SYS_PACCT",
++ "CAP_SYS_ADMIN",
++ "CAP_SYS_BOOT",
++ "CAP_SYS_NICE",
++ "CAP_SYS_RESOURCE",
++ "CAP_SYS_TIME",
++ "CAP_SYS_TTY_CONFIG",
++ "CAP_MKNOD",
++ "CAP_LEASE",
++ "CAP_AUDIT_WRITE",
++ "CAP_AUDIT_CONTROL",
++ "CAP_SETFCAP",
++ "CAP_MAC_OVERRIDE",
++ "CAP_MAC_ADMIN"
++};
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
++
++int
++gr_is_capable(const int cap)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = task->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ curracl = task->acl;
++
++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
++ && cap_raised(cred->cap_effective, cap)) {
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid,
++ cred->gid, task->exec_file ?
++ gr_to_filename(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : curracl->filename,
++ curracl->filename, 0UL,
++ 0UL, "", (unsigned long) cap, NIPQUAD(task->signal->curr_ip));
++ return 1;
++ }
++
++ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap))
++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
++ return 0;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = current->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ return 0;
++}
++
+diff -urNp linux-2.6.31.7/grsecurity/gracl_fs.c linux-2.6.31.7/grsecurity/gracl_fs.c
+--- linux-2.6.31.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_fs.c 2009-12-08 17:39:44.227705011 -0500
+@@ -0,0 +1,424 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return GR_FIND;
++
++ mode =
++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return mode;
++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_FIND)))
++ return 0;
++
++ return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 reqmode = GR_FIND;
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return reqmode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ else if (unlikely(fmode & FMODE_WRITE))
++ reqmode |= GR_WRITE;
++ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
++ reqmode &= ~GR_READ;
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
++ reqmode |= GR_SETID;
++
++ mode =
++ gr_check_create(dentry, p_dentry, p_mnt,
++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 mode, reqmode = GR_FIND;
++
++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_EXEC;
++ if (fmode & S_IWOTH)
++ reqmode |= GR_WRITE;
++ if (fmode & S_IROTH)
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
++ return 1;
++
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_FCHMOD_ACL_MSG);
++ } else {
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_CHMOD_ACL_MSG);
++ } else {
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++ GR_UNIXCONNECT_ACL_MSG);
++}
++
++/* hardlinks require at minimum create permission,
++ any additional privilege required is based on the
++ privilege of the file being linked to
++*/
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ __u32 mode;
++ __u32 needmode = GR_CREATE | GR_LINK;
++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
++
++ mode =
++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++ old_mnt);
++
++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_CREATE | GR_AUDIT_CREATE |
++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ if (unlikely(mode & (S_ISUID | S_ISGID)))
++ reqmode |= GR_SETID;
++
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt)
++{
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname)
++{
++ __u32 comp1, comp2;
++ int error = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (!new_dentry->d_inode) {
++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, old_mnt);
++ } else {
++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++ GR_CREATE | GR_DELETE |
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, parent_mnt);
++ comp2 =
++ gr_search_file(old_dentry,
++ GR_READ | GR_WRITE | GR_AUDIT_READ |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++ }
++
++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++ && !(comp2 & GR_SUPPRESS)) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ error = -EACCES;
++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++ error = -EACCES;
++
++ return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ u16 id;
++ char *rolename;
++ struct file *exec_file;
++
++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
++ id = current->acl_role_id;
++ rolename = current->role->rolename;
++ gr_set_acls(1);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
++ }
++
++ write_lock(&grsec_exec_file_lock);
++ exec_file = current->exec_file;
++ current->exec_file = NULL;
++ write_unlock(&grsec_exec_file_lock);
++
++ if (exec_file)
++ fput(exec_file);
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (task != current && task->acl->mode & GR_PROTPROCFD)
++ return -EACCES;
++
++ return 0;
++}
+diff -urNp linux-2.6.31.7/grsecurity/gracl_ip.c linux-2.6.31.7/grsecurity/gracl_ip.c
+--- linux-2.6.31.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_ip.c 2009-12-08 17:39:44.227705011 -0500
+@@ -0,0 +1,340 @@
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/smp_lock.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND 0x01
++#define GR_CONNECT 0x02
++#define GR_INVERT 0x04
++#define GR_BINDOVERRIDE 0x08
++#define GR_CONNECTOVERRIDE 0x10
++
++static const char * gr_protocols[256] = {
++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++ };
++
++static const char * gr_socktypes[11] = {
++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
++ "unknown:7", "unknown:8", "unknown:9", "packet"
++ };
++
++const char *
++gr_proto_to_name(unsigned char proto)
++{
++ return gr_protocols[proto];
++}
++
++const char *
++gr_socktype_to_name(unsigned char type)
++{
++ return gr_socktypes[type];
++}
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++ struct acl_subject_label *curr;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled()))
++ goto exit;
++
++ if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
++ || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
++ goto exit; // let the kernel handle it
++
++ curr = current->acl;
++
++ if (!curr->ips)
++ goto exit;
++
++ if ((curr->ip_type & (1 << type)) &&
++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
++ goto exit;
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ /* we don't place acls on raw sockets , and sometimes
++ dgram/ip sockets are opened for ioctl and not
++ bind/connect, so we'll fake a bind learn log */
++ if (type == SOCK_RAW || type == SOCK_PACKET) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_CONNECT,
++NIPQUAD(current->signal->curr_ip));
++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_BIND, NIPQUAD(current->signal->curr_ip));
++ }
++ /* we'll log when they use connect or bind */
++ goto exit;
++ }
++
++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet",
++ gr_socktype_to_name(type), gr_proto_to_name(protocol));
++
++ return 0;
++ exit:
++ return 1;
++}
++
++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
++{
++ if ((ip->mode & mode) &&
++ (ip_port >= ip->low) &&
++ (ip_port <= ip->high) &&
++ ((ntohl(ip_addr) & our_netmask) ==
++ (ntohl(our_addr) & our_netmask))
++ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
++ && (ip->type & (1 << type))) {
++ if (ip->mode & GR_INVERT)
++ return 2; // specifically denied
++ else
++ return 1; // allowed
++ }
++
++ return 0; // not specifically allowed, may continue parsing
++}
++
++static int
++gr_search_connectbind(const int full_mode, struct sock *sk,
++ struct sockaddr_in *addr, const int type)
++{
++ char iface[IFNAMSIZ] = {0};
++ struct acl_subject_label *curr;
++ struct acl_ip_label *ip;
++ struct inet_sock *isk;
++ struct net_device *dev;
++ struct in_device *idev;
++ unsigned long i;
++ int ret;
++ int mode = full_mode & (GR_BIND | GR_CONNECT);
++ __u32 ip_addr = 0;
++ __u32 our_addr;
++ __u32 our_netmask;
++ char *p;
++ __u16 ip_port = 0;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++ return 0;
++
++ curr = current->acl;
++ isk = inet_sk(sk);
++
++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
++ addr->sin_addr.s_addr = curr->inaddr_any_override;
++ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
++ struct sockaddr_in saddr;
++ int err;
++
++ saddr.sin_family = AF_INET;
++ saddr.sin_addr.s_addr = curr->inaddr_any_override;
++ saddr.sin_port = isk->sport;
++
++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++
++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++ }
++
++ if (!curr->ips)
++ return 0;
++
++ ip_addr = addr->sin_addr.s_addr;
++ ip_port = ntohs(addr->sin_port);
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(ip_addr), ip_port, type,
++ sk->sk_protocol, mode, NIPQUAD(current->signal->curr_ip));
++ return 0;
++ }
++
++ for (i = 0; i < curr->ip_num; i++) {
++ ip = *(curr->ips + i);
++ if (ip->iface != NULL) {
++ strncpy(iface, ip->iface, IFNAMSIZ - 1);
++ p = strchr(iface, ':');
++ if (p != NULL)
++ *p = '\0';
++ dev = dev_get_by_name(sock_net(sk), iface);
++ if (dev == NULL)
++ continue;
++ idev = in_dev_get(dev);
++ if (idev == NULL) {
++ dev_put(dev);
++ continue;
++ }
++ rcu_read_lock();
++ for_ifa(idev) {
++ if (!strcmp(ip->iface, ifa->ifa_label)) {
++ our_addr = ifa->ifa_address;
++ our_netmask = 0xffffffff;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ return 0;
++ } else if (ret == 2) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ goto denied;
++ }
++ }
++ } endfor_ifa(idev);
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ } else {
++ our_addr = ip->addr;
++ our_netmask = ip->netmask;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1)
++ return 0;
++ else if (ret == 2)
++ goto denied;
++ }
++ }
++
++denied:
++ if (mode == GR_BIND)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++ else if (mode == GR_CONNECT)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++
++ return -EACCES;
++}
++
++int
++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int gr_search_listen(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
++ addr.sin_port = inet_sk(sk)->sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int gr_search_accept(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
++ addr.sin_port = inet_sk(sk)->sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++ if (addr)
++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++ else {
++ struct sockaddr_in sin;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ sin.sin_addr.s_addr = inet->daddr;
++ sin.sin_port = inet->dport;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++ }
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++ struct sockaddr_in sin;
++
++ if (unlikely(skb->len < sizeof (struct udphdr)))
++ return 0; // skip this packet
++
++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
++ sin.sin_port = udp_hdr(skb)->source;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++}
+diff -urNp linux-2.6.31.7/grsecurity/gracl_learn.c linux-2.6.31.7/grsecurity/gracl_learn.c
+--- linux-2.6.31.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_learn.c 2009-12-08 17:39:44.227705011 -0500
+@@ -0,0 +1,211 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/smp_lock.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static DEFINE_SPINLOCK(gr_learn_lock);
++static DECLARE_MUTEX(gr_learn_user_sem);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++ uses a semaphore around the userspace copying, and the other kernel contexts
++ use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval = 0;
++
++ add_wait_queue(&learn_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++ do {
++ down(&gr_learn_user_sem);
++ spin_lock(&gr_learn_lock);
++ if (learn_buffer_len)
++ break;
++ spin_unlock(&gr_learn_lock);
++ up(&gr_learn_user_sem);
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ }
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++
++ schedule();
++ } while (1);
++
++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++ learn_buffer_user_len = learn_buffer_len;
++ retval = learn_buffer_len;
++ learn_buffer_len = 0;
++
++ spin_unlock(&gr_learn_lock);
++
++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++ retval = -EFAULT;
++
++ up(&gr_learn_user_sem);
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&learn_wait, &wait);
++ return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++ poll_wait(file, &learn_wait, wait);
++
++ if (learn_buffer_len)
++ return (POLLIN | POLLRDNORM);
++
++ return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++ char *tmp;
++
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(learn_buffer);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ up(&gr_learn_user_sem);
++
++ return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++ va_list args;
++ unsigned int len;
++
++ if (!gr_learn_attached)
++ return;
++
++ spin_lock(&gr_learn_lock);
++
++ /* leave a gap at the end so we know when it's "full" but don't have to
++ compute the exact length of the string we're trying to append
++ */
++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++ return;
++ }
++ if (learn_buffer == NULL) {
++ spin_unlock(&gr_learn_lock);
++ return;
++ }
++
++ va_start(args, fmt);
++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++ va_end(args);
++
++ learn_buffer_len += len + 1;
++
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++
++ return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ && gr_learn_attached)
++ return -EBUSY;
++ if (file->f_mode & FMODE_READ) {
++ int retval = 0;
++ down(&gr_learn_user_sem);
++ if (learn_buffer == NULL)
++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer_user == NULL)
++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ if (learn_buffer_user == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 1;
++out_error:
++ up(&gr_learn_user_sem);
++ return retval;
++ }
++ return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++ char *tmp;
++
++ if (file->f_mode & FMODE_READ) {
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(tmp);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 0;
++ up(&gr_learn_user_sem);
++ }
++
++ return 0;
++}
++
++const struct file_operations grsec_fops = {
++ .read = read_learn,
++ .write = write_grsec_handler,
++ .open = open_learn,
++ .release = close_learn,
++ .poll = poll_learn,
++};
+diff -urNp linux-2.6.31.7/grsecurity/gracl_res.c linux-2.6.31.7/grsecurity/gracl_res.c
+--- linux-2.6.31.7/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_res.c 2009-12-08 17:39:44.227705011 -0500
+@@ -0,0 +1,58 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[] = {
++ [RLIMIT_CPU] = "RLIMIT_CPU",
++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
++ [RLIMIT_DATA] = "RLIMIT_DATA",
++ [RLIMIT_STACK] = "RLIMIT_STACK",
++ [RLIMIT_CORE] = "RLIMIT_CORE",
++ [RLIMIT_RSS] = "RLIMIT_RSS",
++ [RLIMIT_NPROC] = "RLIMIT_NPROC",
++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
++ [RLIMIT_AS] = "RLIMIT_AS",
++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
++ [RLIMIT_NICE] = "RLIMIT_NICE",
++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
++ [GR_CRASH_RES] = "RLIMIT_CRASH"
++};
++
++void
++gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ const struct cred *cred = __task_cred(task);
++
++ if (res == RLIMIT_NPROC &&
++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
++ return;
++ else if (res == RLIMIT_MEMLOCK &&
++ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
++ return;
++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
++ return;
++
++ if (!gr_acl_is_enabled() && !grsec_resource_logging)
++ return;
++
++ // not yet supported resource
++ if (!restab_log[res])
++ return;
++
++ preempt_disable();
++
++ if (unlikely(((gt && wanted > task->signal->rlim[res].rlim_cur) ||
++ (!gt && wanted >= task->signal->rlim[res].rlim_cur)) &&
++ task->signal->rlim[res].rlim_cur != RLIM_INFINITY))
++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], task->signal->rlim[res].rlim_cur);
++ preempt_enable_no_resched();
++
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/gracl_segv.c linux-2.6.31.7/grsecurity/gracl_segv.c
+--- linux-2.6.31.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_segv.c 2009-12-08 17:39:44.227705011 -0500
+@@ -0,0 +1,307 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static DEFINE_SPINLOCK(gr_uid_lock);
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++ struct acl_role_label *role);
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int
++gr_init_uidset(void)
++{
++ uid_set =
++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++ uid_used = 0;
++
++ return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++ if (uid_set)
++ kfree(uid_set);
++
++ return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++ struct crash_uid *tmp = uid_set;
++ uid_t buid;
++ int low = 0, high = uid_used - 1, mid;
++
++ while (high >= low) {
++ mid = (low + high) >> 1;
++ buid = tmp[mid].uid;
++ if (buid == uid)
++ return mid;
++ if (buid > uid)
++ high = mid - 1;
++ if (buid < uid)
++ low = mid + 1;
++ }
++
++ return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++ unsigned short i, j;
++ struct crash_uid index;
++
++ for (i = 1; i < uid_used; i++) {
++ index = uid_set[i];
++ j = i;
++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++ uid_set[j] = uid_set[j - 1];
++ j--;
++ }
++ uid_set[j] = index;
++ }
++
++ return;
++}
++
++static __inline__ void
++gr_insert_uid(const uid_t uid, const unsigned long expires)
++{
++ int loc;
++
++ if (uid_used == GR_UIDTABLE_MAX)
++ return;
++
++ loc = gr_find_uid(uid);
++
++ if (loc >= 0) {
++ uid_set[loc].expires = expires;
++ return;
++ }
++
++ uid_set[uid_used].uid = uid;
++ uid_set[uid_used].expires = expires;
++ uid_used++;
++
++ gr_insertsort();
++
++ return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++ unsigned short i;
++
++ for (i = loc + 1; i < uid_used; i++)
++ uid_set[i - 1] = uid_set[i];
++
++ uid_used--;
++
++ return;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ int loc;
++ int ret = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ spin_lock(&gr_uid_lock);
++ loc = gr_find_uid(uid);
++
++ if (loc < 0)
++ goto out_unlock;
++
++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
++ gr_remove_uid(loc);
++ else
++ ret = 1;
++
++out_unlock:
++ spin_unlock(&gr_uid_lock);
++ return ret;
++}
++
++static __inline__ int
++proc_is_setxid(const struct cred *cred)
++{
++ if (cred->uid != cred->euid || cred->uid != cred->suid ||
++ cred->uid != cred->fsuid)
++ return 1;
++ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
++ cred->gid != cred->fsgid)
++ return 1;
++
++ return 0;
++}
++static __inline__ int
++gr_fake_force_sig(int sig, struct task_struct *t)
++{
++ unsigned long int flags;
++ int ret, blocked, ignored;
++ struct k_sigaction *action;
++
++ spin_lock_irqsave(&t->sighand->siglock, flags);
++ action = &t->sighand->action[sig-1];
++ ignored = action->sa.sa_handler == SIG_IGN;
++ blocked = sigismember(&t->blocked, sig);
++ if (blocked || ignored) {
++ action->sa.sa_handler = SIG_DFL;
++ if (blocked) {
++ sigdelset(&t->blocked, sig);
++ recalc_sigpending_and_wake(t);
++ }
++ }
++ if (action->sa.sa_handler == SIG_DFL)
++ t->signal->flags &= ~SIGNAL_UNKILLABLE;
++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
++
++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++ return ret;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ struct acl_subject_label *curr;
++ struct acl_subject_label *curr2;
++ struct task_struct *tsk, *tsk2;
++ const struct cred *cred = __task_cred(task);
++ const struct cred *cred2;
++
++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++ return;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curr = task->acl;
++
++ if (!(curr->resmask & (1 << GR_CRASH_RES)))
++ return;
++
++ if (time_before_eq(curr->expires, get_seconds())) {
++ curr->expires = 0;
++ curr->crashes = 0;
++ }
++
++ curr->crashes++;
++
++ if (!curr->expires)
++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds())) {
++ if (cred->uid && proc_is_setxid(cred)) {
++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ spin_lock(&gr_uid_lock);
++ gr_insert_uid(cred->uid, curr->expires);
++ spin_unlock(&gr_uid_lock);
++ curr->expires = 0;
++ curr->crashes = 0;
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ cred2 = __task_cred(tsk);
++ if (tsk != task && cred2->uid == cred->uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ } else {
++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ if (likely(tsk != task)) {
++ curr2 = tsk->acl;
++
++ if (curr2->device == curr->device &&
++ curr2->inode == curr->inode)
++ gr_fake_force_sig(SIGKILL, tsk);
++ }
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ }
++ }
++
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_inode_lock);
++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
++ filp->f_path.dentry->d_inode->i_sb->s_dev,
++ current->role);
++ read_unlock(&gr_inode_lock);
++
++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
++ (!curr->crashes && !curr->expires))
++ return 0;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds()))
++ return 1;
++ else if (time_before_eq(curr->expires, get_seconds())) {
++ curr->crashes = 0;
++ curr->expires = 0;
++ }
++
++ return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ struct acl_subject_label *curracl;
++ __u32 curr_ip;
++ struct task_struct *p, *p2;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curracl = task->acl;
++ curr_ip = task->signal->curr_ip;
++
++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ if (p->signal->curr_ip == curr_ip)
++ gr_fake_force_sig(SIGKILL, p);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++ } else if (curracl->mode & GR_KILLPROC)
++ gr_fake_force_sig(SIGKILL, task);
++
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/gracl_shm.c linux-2.6.31.7/grsecurity/gracl_shm.c
+--- linux-2.6.31.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/gracl_shm.c 2009-12-08 17:39:44.228758139 -0500
+@@ -0,0 +1,37 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ struct task_struct *task;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ task = find_task_by_vpid(shm_cprid);
++
++ if (unlikely(!task))
++ task = find_task_by_vpid(shm_lapid);
++
++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
++ (task->pid == shm_lapid)) &&
++ (task->acl->mode & GR_PROTSHM) &&
++ (task->acl != current->acl))) {
++ read_unlock(&tasklist_lock);
++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
++ return 0;
++ }
++ read_unlock(&tasklist_lock);
++
++ return 1;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_chdir.c linux-2.6.31.7/grsecurity/grsec_chdir.c
+--- linux-2.6.31.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_chdir.c 2009-12-08 17:39:44.228758139 -0500
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ if ((grsec_enable_chdir && grsec_enable_group &&
++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++ !grsec_enable_group)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
++ }
++#endif
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_chroot.c linux-2.6.31.7/grsecurity/grsec_chroot.c
+--- linux-2.6.31.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_chroot.c 2009-12-08 17:39:44.228758139 -0500
+@@ -0,0 +1,348 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/pid_namespace.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ struct pid *spid = NULL;
++
++ if (unlikely(!grsec_enable_chroot_unix))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ spid = find_vpid(pid);
++ if (spid) {
++ struct task_struct *p;
++ p = pid_task(spid, PIDTYPE_PID);
++ task_lock(p);
++ if (unlikely(!have_same_root(current, p))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
++ return 0;
++ }
++ task_unlock(p);
++ }
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++ && proc_is_chrooted(current)) {
++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
++ return 0;
++
++ task_lock(p);
++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
++ !have_same_root(current, p)) {
++ task_unlock(p);
++ return 1;
++ }
++ task_unlock(p);
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++ struct dentry *dentry = (struct dentry *)u_dentry;
++ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
++ struct dentry *realroot;
++ struct vfsmount *realrootmnt;
++ struct dentry *currentroot;
++ struct vfsmount *currentmnt;
++ struct task_struct *reaper = &init_task;
++ int ret = 1;
++
++ read_lock(&reaper->fs->lock);
++ realrootmnt = mntget(reaper->fs->root.mnt);
++ realroot = dget(reaper->fs->root.dentry);
++ read_unlock(&reaper->fs->lock);
++
++ read_lock(&current->fs->lock);
++ currentmnt = mntget(current->fs->root.mnt);
++ currentroot = dget(current->fs->root.dentry);
++ read_unlock(&current->fs->lock);
++
++ spin_lock(&dcache_lock);
++ for (;;) {
++ if (unlikely((dentry == realroot && mnt == realrootmnt)
++ || (dentry == currentroot && mnt == currentmnt)))
++ break;
++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
++ if (mnt->mnt_parent == mnt)
++ break;
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++ dentry = dentry->d_parent;
++ }
++ spin_unlock(&dcache_lock);
++
++ dput(currentroot);
++ mntput(currentmnt);
++
++ /* access is outside of chroot */
++ if (dentry == realroot && mnt == realrootmnt)
++ ret = 0;
++
++ dput(realroot);
++ mntput(realrootmnt);
++ return ret;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ if (!grsec_enable_chroot_fchdir)
++ return 1;
++
++ if (!proc_is_chrooted(current))
++ return 1;
++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
++ return 0;
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ struct pid *pid = NULL;
++ time_t starttime;
++
++ if (unlikely(!grsec_enable_chroot_shmat))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ pid = find_vpid(shm_cprid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid, PIDTYPE_PID);
++ task_lock(p);
++ starttime = p->start_time.tv_sec;
++ if (unlikely(!have_same_root(current, p) &&
++ time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ task_unlock(p);
++ } else {
++ pid = find_vpid(shm_lapid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid, PIDTYPE_PID);
++ task_lock(p);
++ if (unlikely(!have_same_root(current, p))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ task_unlock(p);
++ }
++ }
++
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++ !gr_is_outside_chroot(dentry, mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_caps(struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
++ (init_task.fs->root.dentry != path->dentry) &&
++ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
++
++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++ const struct cred *old = current_cred();
++ struct cred *new = prepare_creds();
++ if (new == NULL)
++ return 1;
++
++ new->cap_permitted = cap_drop(old->cap_permitted,
++ chroot_caps);
++ new->cap_inheritable = cap_drop(old->cap_inheritable,
++ chroot_caps);
++ new->cap_effective = cap_drop(old->cap_effective,
++ chroot_caps);
++
++ commit_creds(new);
++
++ return 0;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
++ && (op & MAY_WRITE))
++ return -EACCES;
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_chdir(struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ if (grsec_enable_chroot_chdir)
++ set_fs_pwd(current->fs, path);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ if (grsec_enable_chroot_chmod &&
++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_handle_chroot_caps);
++#endif
+diff -urNp linux-2.6.31.7/grsecurity/grsec_disabled.c linux-2.6.31.7/grsecurity/grsec_disabled.c
+--- linux-2.6.31.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_disabled.c 2009-12-08 17:39:44.228758139 -0500
+@@ -0,0 +1,426 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__u32
++gr_handle_sysctl(const struct ctl_table * table, const int op)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ return 0;
++}
++#endif
++
++int
++gr_acl_is_enabled(void)
++{
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++ return 0;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ return;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ return 0;
++}
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ return 0;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ return;
++}
++
++int
++gr_set_acls(const int type)
++{
++ return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ return;
++}
++
++void
++gr_set_pax_flags(struct task_struct *task)
++{
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_share)
++{
++ return 0;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ return 0;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ return 0;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ return;
++}
++
++int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++ return 1;
++}
++
++int
++gr_search_connectbind(const int mode, const struct socket *sock,
++ const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_is_capable(const int cap)
++{
++ return 1;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ return 1;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ return;
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++ unsigned int *vm_flags)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry,
++ const struct vfsmount * mnt, const int fmode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++void
++grsecurity_init(void)
++{
++ return;
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct inode *old_parent_inode,
++ const struct vfsmount *old_mnt, const char *newname)
++{
++ return 0;
++}
++
++int
++gr_acl_handle_filldir(const struct file *file, const char *name,
++ const int namelen, const ino_t ino)
++{
++ return 1;
++}
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ return 1;
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_search_accept(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_listen(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ return 1;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ return;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ return 1;
++}
++
++void
++gr_set_role_label(const uid_t uid, const gid_t gid)
++{
++ return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ return 0;
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ return 0;
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++void
++gr_set_kernel_label(struct task_struct *task)
++{
++ return;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
+diff -urNp linux-2.6.31.7/grsecurity/grsec_exec.c linux-2.6.31.7/grsecurity/grsec_exec.c
+--- linux-2.6.31.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_exec.c 2009-12-08 17:39:44.228758139 -0500
+@@ -0,0 +1,89 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/smp_lock.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++static char gr_exec_arg_buf[132];
++static DECLARE_MUTEX(gr_exec_arg_sem);
++#endif
++
++int
++gr_handle_nproc(void)
++{
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ const struct cred *cred = current_cred();
++ if (grsec_enable_execve && cred->user &&
++ (atomic_read(&cred->user->processes) >
++ current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
++ return -EAGAIN;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char *grarg = gr_exec_arg_buf;
++ unsigned int i, x, execlen = 0;
++ char c;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ down(&gr_exec_arg_sem);
++ memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++ if (unlikely(argv == NULL))
++ goto log;
++
++ for (i = 0; i < bprm->argc && execlen < 128; i++) {
++ const char __user *p;
++ unsigned int len;
++
++ if (copy_from_user(&p, argv + i, sizeof(p)))
++ goto log;
++ if (!p)
++ goto log;
++ len = strnlen_user(p, 128 - execlen);
++ if (len > 128 - execlen)
++ len = 128 - execlen;
++ else if (len > 0)
++ len--;
++ if (copy_from_user(grarg + execlen, p, len))
++ goto log;
++
++ /* rewrite unprintable characters */
++ for (x = 0; x < len; x++) {
++ c = *(grarg + execlen + x);
++ if (c < 32 || c > 126)
++ *(grarg + execlen + x) = ' ';
++ }
++
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++ bprm->file->f_path.mnt, grarg);
++ up(&gr_exec_arg_sem);
++#endif
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_fifo.c linux-2.6.31.7/grsecurity/grsec_fifo.c
+--- linux-2.6.31.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_fifo.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
++ (cred->fsuid != dentry->d_inode->i_uid)) {
++ if (!generic_permission(dentry->d_inode, acc_mode, NULL))
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_fork.c linux-2.6.31.7/grsecurity/grsec_fork.c
+--- linux-2.6.31.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_fork.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,15 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/errno.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ if (grsec_enable_forkfail && retval != -ERESTARTNOINTR)
++ gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval);
++#endif
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_init.c linux-2.6.31.7/grsecurity/grsec_init.c
+--- linux-2.6.31.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_init.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,231 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp_lock.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_harden_ptrace;
++int grsec_enable_fifo;
++int grsec_enable_execve;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_time;
++int grsec_enable_audit_textrel;
++int grsec_enable_group;
++int grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_mount;
++int grsec_enable_rofs;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++int grsec_tpe_gid;
++int grsec_enable_tpe_all;
++int grsec_enable_socket_all;
++int grsec_socket_all_gid;
++int grsec_enable_socket_client;
++int grsec_socket_client_gid;
++int grsec_enable_socket_server;
++int grsec_socket_server_gid;
++int grsec_resource_logging;
++int grsec_lock;
++
++DEFINE_SPINLOCK(grsec_alert_lock);
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++DEFINE_SPINLOCK(grsec_audit_lock);
++
++DEFINE_RWLOCK(grsec_exec_file_lock);
++
++char *gr_shared_page[4];
++
++char *gr_alert_log_fmt;
++char *gr_audit_log_fmt;
++char *gr_alert_log_buf;
++char *gr_audit_log_buf;
++
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++
++void __init
++grsecurity_init(void)
++{
++ int j;
++ /* create the per-cpu shared pages */
++
++#ifdef CONFIG_X86
++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
++#endif
++
++ for (j = 0; j < 4; j++) {
++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
++ if (gr_shared_page[j] == NULL) {
++ panic("Unable to allocate grsecurity shared page");
++ return;
++ }
++ }
++
++ /* allocate log buffers */
++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_alert_log_fmt) {
++ panic("Unable to allocate grsecurity alert log format buffer");
++ return;
++ }
++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_audit_log_fmt) {
++ panic("Unable to allocate grsecurity audit log format buffer");
++ return;
++ }
++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_alert_log_buf) {
++ panic("Unable to allocate grsecurity alert log buffer");
++ return;
++ }
++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_audit_log_buf) {
++ panic("Unable to allocate grsecurity audit log buffer");
++ return;
++ }
++
++ /* allocate memory for authentication structure */
++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++ panic("Unable to allocate grsecurity authentication structure");
++ return;
++ }
++
++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++ grsec_lock = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ grsec_enable_audit_textrel = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ grsec_enable_group = 1;
++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ grsec_enable_harden_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ grsec_enable_execve = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ grsec_resource_logging = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ grsec_enable_tpe = 1;
++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ grsec_enable_socket_all = 1;
++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ grsec_enable_socket_client = 1;
++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ grsec_enable_socket_server = 1;
++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++#endif
++#endif
++
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_link.c linux-2.6.31.7/grsecurity/grsec_link.c
+--- linux-2.6.31.7/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_link.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,43 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode, const int mode, const char *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
++ (!S_ISREG(mode) || (mode & S_ISUID) ||
++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
++ (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) &&
++ !capable(CAP_FOWNER) && cred->uid) {
++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_log.c linux-2.6.31.7/grsecurity/grsec_log.c
+--- linux-2.6.31.7/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_log.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,294 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/tty.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++#define BEGIN_LOCKS(x) \
++ read_lock(&tasklist_lock); \
++ read_lock(&grsec_exec_file_lock); \
++ if (x != GR_DO_AUDIT) \
++ spin_lock(&grsec_alert_lock); \
++ else \
++ spin_lock(&grsec_audit_lock)
++
++#define END_LOCKS(x) \
++ if (x != GR_DO_AUDIT) \
++ spin_unlock(&grsec_alert_lock); \
++ else \
++ spin_unlock(&grsec_audit_lock); \
++ read_unlock(&grsec_exec_file_lock); \
++ read_unlock(&tasklist_lock); \
++ if (x == GR_DONT_AUDIT) \
++ gr_handle_alertkill(current)
++
++enum {
++ FLOODING,
++ NO_FLOODING
++};
++
++extern char *gr_alert_log_fmt;
++extern char *gr_audit_log_fmt;
++extern char *gr_alert_log_buf;
++extern char *gr_audit_log_buf;
++
++static int gr_log_start(int audit)
++{
++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++
++ if (audit == GR_DO_AUDIT)
++ goto set_fmt;
++
++ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
++ grsec_alert_wtime = jiffies;
++ grsec_alert_fyet = 0;
++ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
++ grsec_alert_fyet++;
++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
++ grsec_alert_wtime = jiffies;
++ grsec_alert_fyet++;
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
++ return FLOODING;
++ } else return FLOODING;
++
++set_fmt:
++ memset(buf, 0, PAGE_SIZE);
++ if (current->signal->curr_ip && gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else if (current->signal->curr_ip) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip));
++ } else if (gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else {
++ sprintf(fmt, "%s%s", loglevel, "grsec: ");
++ strcpy(buf, fmt);
++ }
++
++ return NO_FLOODING;
++}
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++ __attribute__ ((format (printf, 2, 0)));
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++
++ return;
++}
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++ __attribute__ ((format (printf, 2, 3)));
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++ va_list ap;
++
++ va_start(ap, msg);
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++ va_end(ap);
++
++ return;
++}
++
++static void gr_log_end(int audit)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++
++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->parent)));
++ printk("%s\n", buf);
++
++ return;
++}
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
++{
++ int logtype;
++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
++ char *str1, *str2, *str3;
++ void *voidptr;
++ int num1, num2;
++ unsigned long ulong1, ulong2;
++ struct dentry *dentry;
++ struct vfsmount *mnt;
++ struct file *file;
++ struct task_struct *task;
++ const struct cred *cred, *pcred;
++ va_list ap;
++
++ BEGIN_LOCKS(audit);
++ logtype = gr_log_start(audit);
++ if (logtype == FLOODING) {
++ END_LOCKS(audit);
++ return;
++ }
++ va_start(ap, argtypes);
++ switch (argtypes) {
++ case GR_TTYSNIFF:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, NIPQUAD(task->signal->curr_ip), gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid);
++ break;
++ case GR_SYSCTL_HIDDEN:
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, str1);
++ break;
++ case GR_RBAC:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_STR_RBAC:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_MODE2:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
++ break;
++ case GR_RBAC_MODE3:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ str3 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
++ break;
++ case GR_FILENAME:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
++ break;
++ case GR_STR_FILENAME:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_FILENAME_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_FILENAME_TWO_INT:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
++ break;
++ case GR_FILENAME_TWO_INT_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
++ break;
++ case GR_TEXTREL:
++ file = va_arg(ap, struct file *);
++ ulong1 = va_arg(ap, unsigned long);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
++ break;
++ case GR_PTRACE:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
++ break;
++ case GR_RESOURCE:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++ ulong1 = va_arg(ap, unsigned long);
++ str1 = va_arg(ap, char *);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CAP:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_SIG:
++ str1 = va_arg(ap, char *);
++ voidptr = va_arg(ap, void *);
++ gr_log_middle_varargs(audit, msg, str1, voidptr);
++ break;
++ case GR_SIG2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++ num1 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CRASH1:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
++ break;
++ case GR_CRASH2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
++ break;
++ case GR_PSACCT:
++ {
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ char cur_tty[64] = { 0 };
++ char parent_tty[64] = { 0 };
++
++ task = va_arg(ap, struct task_struct *);
++ wday = va_arg(ap, unsigned int);
++ cday = va_arg(ap, unsigned int);
++ whr = va_arg(ap, int);
++ chr = va_arg(ap, int);
++ wmin = va_arg(ap, int);
++ cmin = va_arg(ap, int);
++ wsec = va_arg(ap, int);
++ csec = va_arg(ap, int);
++ ulong1 = va_arg(ap, unsigned long);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->parent);
++
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, NIPQUAD(task->signal->curr_ip), tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, NIPQUAD(task->parent->signal->curr_ip), tty_name(task->parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ }
++ break;
++ default:
++ gr_log_middle(audit, msg, ap);
++ }
++ va_end(ap);
++ gr_log_end(audit);
++ END_LOCKS(audit);
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_mem.c linux-2.6.31.7/grsecurity/grsec_mem.c
+--- linux-2.6.31.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_mem.c 2009-12-08 17:39:44.229805391 -0500
+@@ -0,0 +1,85 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
++ return;
++}
++
++void
++gr_handle_iopl(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
++ return;
++}
++
++void
++gr_handle_mem_write(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG);
++ return;
++}
++
++void
++gr_handle_kmem_write(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG);
++ return;
++}
++
++void
++gr_handle_open_port(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG);
++ return;
++}
++
++int
++gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
++{
++ unsigned long start, end;
++
++ start = offset;
++ end = start + vma->vm_end - vma->vm_start;
++
++ if (start > end) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
++ return -EPERM;
++ }
++
++ /* allowed ranges : ISA I/O BIOS */
++ if ((start >= __pa(high_memory))
++#if defined(CONFIG_X86) || defined(CONFIG_PPC)
++ || (start >= 0x000a0000 && end <= 0x00100000)
++ || (start >= 0x00000000 && end <= 0x00001000)
++#endif
++ )
++ return 0;
++
++ if (vma->vm_flags & VM_WRITE) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
++ return -EPERM;
++ } else
++ vma->vm_flags &= ~VM_MAYWRITE;
++
++ return 0;
++}
++
++void
++gr_log_nonroot_mod_load(const char *modname)
++{
++ gr_log_str(GR_DONT_AUDIT, GR_NONROOT_MODLOAD_MSG, modname);
++ return;
++}
++
++void
++gr_handle_vm86(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_mount.c linux-2.6.31.7/grsecurity/grsec_mount.c
+--- linux-2.6.31.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_mount.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,62 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mount.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
++#endif
++ return;
++}
++
++int
++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
++
++int
++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_sig.c linux-2.6.31.7/grsecurity/grsec_sig.c
+--- linux-2.6.31.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_sig.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,65 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++char *signames[] = {
++ [SIGSEGV] = "Segmentation fault",
++ [SIGILL] = "Illegal instruction",
++ [SIGABRT] = "Abort",
++ [SIGBUS] = "Invalid alignment/Bus error"
++};
++
++void
++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++ (sig == SIGABRT) || (sig == SIGBUS))) {
++ if (t->pid == current->pid) {
++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
++ } else {
++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
++ }
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (current->pid > 1 && gr_check_protected_task(p)) {
++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
++ return -EPERM;
++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++void gr_handle_brute_attach(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ if (p->parent && p->parent->exec_file == p->exec_file)
++ p->parent->brute = 1;
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++#endif
++ return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ if (current->brute)
++ msleep(30 * 1000);
++#endif
++ return;
++}
++
+diff -urNp linux-2.6.31.7/grsecurity/grsec_sock.c linux-2.6.31.7/grsecurity/grsec_sock.c
+--- linux-2.6.31.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_sock.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,269 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
++EXPORT_SYMBOL(gr_cap_rtnetlink);
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define gr_conn_table_size 32749
++struct conn_table_entry {
++ struct conn_table_entry *next;
++ struct signal_struct *sig;
++};
++
++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
++DEFINE_SPINLOCK(gr_conn_table_lock);
++
++extern const char * gr_socktype_to_name(unsigned char type);
++extern const char * gr_proto_to_name(unsigned char proto);
++
++static __inline__ int
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
++ sig->gr_sport == sport && sig->gr_dport == dport))
++ return 1;
++ else
++ return 0;
++}
++
++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
++{
++ struct conn_table_entry **match;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ newent->sig = sig;
++
++ match = &gr_conn_table[index];
++ newent->next = *match;
++ *match = newent;
++
++ return;
++}
++
++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
++{
++ struct conn_table_entry *match, *last = NULL;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig,
++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
++ sig->gr_dport)) {
++ last = match;
++ match = match->next;
++ }
++
++ if (match) {
++ if (last)
++ last->next = match->next;
++ else
++ gr_conn_table[index] = NULL;
++ kfree(match);
++ }
++
++ return;
++}
++
++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ struct conn_table_entry *match;
++ unsigned int index;
++
++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
++ match = match->next;
++
++ if (match)
++ return match->sig;
++ else
++ return NULL;
++}
++
++#endif
++
++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *sig = task->signal;
++ struct conn_table_entry *newent;
++
++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
++ if (newent == NULL)
++ return;
++ /* no bh lock needed since we are called with bh disabled */
++ spin_lock(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(sig);
++ sig->gr_saddr = inet->rcv_saddr;
++ sig->gr_daddr = inet->daddr;
++ sig->gr_sport = inet->sport;
++ sig->gr_dport = inet->dport;
++ gr_add_to_task_ip_table_nolock(sig, newent);
++ spin_unlock(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ spin_lock_bh(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(task->signal);
++ spin_unlock_bh(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *p, *set;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++ return;
++
++ set = current->signal;
++
++ spin_lock_bh(&gr_conn_table_lock);
++ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
++ inet->dport, inet->sport);
++ if (unlikely(p != NULL)) {
++ set->curr_ip = p->curr_ip;
++ set->used_accept = 1;
++ gr_del_task_from_ip_table_nolock(p);
++ spin_unlock_bh(&gr_conn_table_lock);
++ return;
++ }
++ spin_unlock_bh(&gr_conn_table_lock);
++
++ set->curr_ip = inet->daddr;
++ set->used_accept = 1;
++#endif
++ return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++ (family != AF_UNIX) && (family != AF_LOCAL)) {
++ gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol));
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server_other(const struct sock *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sk_family != AF_UNIX) &&
++ (sck->sk_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++kernel_cap_t
++gr_cap_rtnetlink(struct sock *sock)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_acl_is_enabled())
++ return current_cap();
++ else if (sock->sk_protocol == NETLINK_ISCSI &&
++ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
++ gr_is_capable(CAP_SYS_ADMIN))
++ return current_cap();
++ else if (sock->sk_protocol == NETLINK_AUDIT &&
++ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
++ gr_is_capable(CAP_AUDIT_WRITE) &&
++ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
++ gr_is_capable(CAP_AUDIT_CONTROL))
++ return current_cap();
++ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
++ gr_is_capable(CAP_NET_ADMIN))
++ return current_cap();
++ else
++ return __cap_empty_set;
++#else
++ return current_cap();
++#endif
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_sysctl.c linux-2.6.31.7/grsecurity/grsec_sysctl.c
+--- linux-2.6.31.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_sysctl.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,419 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC_ROFS
++static int __maybe_unused one = 1;
++#endif
++
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++#ifdef CONFIG_GRKERNSEC_LINK
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "linking_restrictions",
++ .data = &grsec_enable_link,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "fifo_restrictions",
++ .data = &grsec_enable_fifo,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "execve_limiting",
++ .data = &grsec_enable_execve,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "exec_logging",
++ .data = &grsec_enable_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "signal_logging",
++ .data = &grsec_enable_signal,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "forkfail_logging",
++ .data = &grsec_enable_forkfail,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "timechange_logging",
++ .data = &grsec_enable_time,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_shmat",
++ .data = &grsec_enable_chroot_shmat,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_unix",
++ .data = &grsec_enable_chroot_unix,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_mount",
++ .data = &grsec_enable_chroot_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_fchdir",
++ .data = &grsec_enable_chroot_fchdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_chroot",
++ .data = &grsec_enable_chroot_double,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_pivot",
++ .data = &grsec_enable_chroot_pivot,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_enforce_chdir",
++ .data = &grsec_enable_chroot_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_chmod",
++ .data = &grsec_enable_chroot_chmod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_mknod",
++ .data = &grsec_enable_chroot_mknod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_restrict_nice",
++ .data = &grsec_enable_chroot_nice,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_execlog",
++ .data = &grsec_enable_chroot_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_caps",
++ .data = &grsec_enable_chroot_caps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_deny_sysctl",
++ .data = &grsec_enable_chroot_sysctl,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "tpe",
++ .data = &grsec_enable_tpe,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "tpe_gid",
++ .data = &grsec_tpe_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "tpe_restrict_all",
++ .data = &grsec_enable_tpe_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_all",
++ .data = &grsec_enable_socket_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_all_gid",
++ .data = &grsec_socket_all_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_client",
++ .data = &grsec_enable_socket_client,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_client_gid",
++ .data = &grsec_socket_client_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_server",
++ .data = &grsec_enable_socket_server,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "socket_server_gid",
++ .data = &grsec_socket_server_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "audit_group",
++ .data = &grsec_enable_group,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "audit_gid",
++ .data = &grsec_audit_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "audit_chdir",
++ .data = &grsec_enable_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "audit_mount",
++ .data = &grsec_enable_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "audit_textrel",
++ .data = &grsec_enable_audit_textrel,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "dmesg",
++ .data = &grsec_enable_dmesg,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "chroot_findtask",
++ .data = &grsec_enable_chroot_findtask,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "resource_logging",
++ .data = &grsec_resource_logging,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "harden_ptrace",
++ .data = &grsec_enable_harden_ptrace,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "grsec_lock",
++ .data = &grsec_lock,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_ROFS
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "romount_protect",
++ .data = &grsec_enable_rofs,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one,
++ },
++#endif
++ { .ctl_name = 0 }
++};
++#endif
+diff -urNp linux-2.6.31.7/grsecurity/grsec_textrel.c linux-2.6.31.7/grsecurity/grsec_textrel.c
+--- linux-2.6.31.7/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_textrel.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,16 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ if (grsec_enable_audit_textrel)
++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_time.c linux-2.6.31.7/grsecurity/grsec_time.c
+--- linux-2.6.31.7/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_time.c 2009-12-08 17:39:44.230810694 -0500
+@@ -0,0 +1,13 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++ if (grsec_enable_time)
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
++#endif
++ return;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsec_tpe.c linux-2.6.31.7/grsecurity/grsec_tpe.c
+--- linux-2.6.31.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsec_tpe.c 2009-12-08 17:39:44.231808097 -0500
+@@ -0,0 +1,38 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
++ const struct cred *cred = current_cred();
++
++ if (cred->uid && ((grsec_enable_tpe &&
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ !in_group_p(grsec_tpe_gid)
++#else
++ in_group_p(grsec_tpe_gid)
++#endif
++ ) || gr_acl_tpe_check()) &&
++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
++ (inode->i_mode & S_IWOTH))))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
++ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++#endif
++#endif
++ return 1;
++}
+diff -urNp linux-2.6.31.7/grsecurity/grsum.c linux-2.6.31.7/grsecurity/grsum.c
+--- linux-2.6.31.7/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/grsum.c 2009-12-08 17:39:44.231808097 -0500
+@@ -0,0 +1,59 @@
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++ char *p;
++ struct crypto_hash *tfm;
++ struct hash_desc desc;
++ struct scatterlist sg;
++ unsigned char temp_sum[GR_SHA_LEN];
++ volatile int retval = 0;
++ volatile int dummy = 0;
++ unsigned int i;
++
++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR(tfm)) {
++ /* should never happen, since sha256 should be built in */
++ return 1;
++ }
++
++ desc.tfm = tfm;
++ desc.flags = 0;
++
++ crypto_hash_init(&desc);
++
++ p = salt;
++ sg_set_buf(&sg, p, GR_SALT_LEN);
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ p = entry->pw;
++ sg_set_buf(&sg, p, strlen(p));
++
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ crypto_hash_final(&desc, temp_sum);
++
++ memset(entry->pw, 0, GR_PW_LEN);
++
++ for (i = 0; i < GR_SHA_LEN; i++)
++ if (sum[i] != temp_sum[i])
++ retval = 1;
++ else
++ dummy = 1; // waste a cycle
++
++ crypto_free_hash(tfm);
++
++ return retval;
++}
+diff -urNp linux-2.6.31.7/grsecurity/Kconfig linux-2.6.31.7/grsecurity/Kconfig
+--- linux-2.6.31.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/Kconfig 2009-12-08 17:39:44.224728422 -0500
+@@ -0,0 +1,937 @@
++#
++# grecurity configuration
++#
++
++menu "Grsecurity"
++
++config GRKERNSEC
++ bool "Grsecurity"
++ select CRYPTO
++ select CRYPTO_SHA256
++ help
++ If you say Y here, you will be able to configure many features
++ that will enhance the security of your system. It is highly
++ recommended that you say Y here and read through the help
++ for each option so that you fully understand the features and
++ can evaluate their usefulness for your machine.
++
++choice
++ prompt "Security Level"
++ depends on GRKERNSEC
++ default GRKERNSEC_CUSTOM
++
++config GRKERNSEC_LOW
++ bool "Low"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_CHDIR
++
++ help
++ If you choose this option, several of the grsecurity options will
++ be enabled that will give you greater protection against a number
++ of attacks, while assuring that none of your software will have any
++ conflicts with the additional security measures. If you run a lot
++ of unusual software, or you are having problems with the higher
++ security levels, you should say Y here. With this option, the
++ following features are enabled:
++
++ - Linking restrictions
++ - FIFO restrictions
++ - Enforcing RLIMIT_NPROC on execve
++ - Restricted dmesg
++ - Enforced chdir("/") on chroot
++ - Runtime module disabling
++
++config GRKERNSEC_MEDIUM
++ bool "Medium"
++ select PAX
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_USERGROUP
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_REFCOUNT if (X86 || SPARC64)
++ select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC32 || PPC64) && (SLAB || SLUB || SLOB))
++
++ help
++ If you say Y here, several features in addition to those included
++ in the low additional security level will be enabled. These
++ features provide even more security to your system, though in rare
++ cases they may be incompatible with very old or poorly written
++ software. If you enable this option, make sure that your auth
++ service (identd) is running as gid 1001. With this option,
++ the following features (in addition to those provided in the
++ low additional security level) will be enabled:
++
++ - Failed fork logging
++ - Time change logging
++ - Signal logging
++ - Deny mounts in chroot
++ - Deny double chrooting
++ - Deny sysctl writes in chroot
++ - Deny mknod in chroot
++ - Deny access to abstract AF_UNIX sockets out of chroot
++ - Deny pivot_root in chroot
++ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
++ - /proc restrictions with special GID set to 10 (usually wheel)
++ - Address Space Layout Randomization (ASLR)
++ - Prevent exploitation of most refcount overflows
++ - Bounds checking of copying between the kernel and userland
++
++config GRKERNSEC_HIGH
++ bool "High"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_SHMAT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_FCHDIR
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_CHROOT_CAPS
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_CHROOT_FINDTASK
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
++ select GRKERNSEC_HIDESYM
++ select GRKERNSEC_BRUTE
++ select GRKERNSEC_PROC_USERGROUP
++ select GRKERNSEC_KMEM
++ select GRKERNSEC_RESLOG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_PROC_ADD
++ select GRKERNSEC_CHROOT_CHMOD
++ select GRKERNSEC_CHROOT_NICE
++ select GRKERNSEC_AUDIT_MOUNT
++ select GRKERNSEC_MODHARDEN if (MODULES)
++ select GRKERNSEC_HARDEN_PTRACE
++ select GRKERNSEC_VM86 if (X86_32)
++ select PAX
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_NOEXEC
++ select PAX_MPROTECT
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select PAX_KERNEXEC if (X86 && (!X86_32 || X86_WP_WORKS_OK))
++ select PAX_MEMORY_UDEREF if (X86_32)
++ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
++ select PAX_SEGMEXEC if (X86_32)
++ select PAX_PAGEEXEC
++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
++ select PAX_EMUTRAMP if (PARISC)
++ select PAX_EMUSIGRT if (PARISC)
++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
++ select PAX_REFCOUNT if (X86 || SPARC64)
++ select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
++ help
++ If you say Y here, many of the features of grsecurity will be
++ enabled, which will protect you against many kinds of attacks
++ against your system. The heightened security comes at a cost
++ of an increased chance of incompatibilities with rare software
++ on your machine. Since this security level enables PaX, you should
++ view <http://pax.grsecurity.net> and read about the PaX
++ project. While you are there, download chpax and run it on
++ binaries that cause problems with PaX. Also remember that
++ since the /proc restrictions are enabled, you must run your
++ identd as gid 1001. This security level enables the following
++ features in addition to those listed in the low and medium
++ security levels:
++
++ - Additional /proc restrictions
++ - Chmod restrictions in chroot
++ - No signals, ptrace, or viewing of processes outside of chroot
++ - Capability restrictions in chroot
++ - Deny fchdir out of chroot
++ - Priority restrictions in chroot
++ - Segmentation-based implementation of PaX
++ - Mprotect restrictions
++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
++ - Kernel stack randomization
++ - Mount/unmount/remount logging
++ - Kernel symbol hiding
++ - Prevention of memory exhaustion-based exploits
++ - Hardening of module auto-loading
++ - Ptrace restrictions
++ - Restricted vm86 mode
++
++config GRKERNSEC_CUSTOM
++ bool "Custom"
++ help
++ If you say Y here, you will be able to configure every grsecurity
++ option, which allows you to enable many more features that aren't
++ covered in the basic security levels. These additional features
++ include TPE, socket restrictions, and the sysctl system for
++ grsecurity. It is advised that you read through the help for
++ each option to determine its usefulness in your situation.
++
++endchoice
++
++menu "Address Space Protection"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
++ help
++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++ be written to via mmap or otherwise to modify the running kernel.
++ /dev/port will also not be allowed to be opened. If you have module
++ support disabled, enabling this will close up four ways that are
++ currently used to insert malicious code into the running kernel.
++ Even with all these features enabled, we still highly recommend that
++ you use the RBAC system, as it is still possible for an attacker to
++ modify the running kernel through privileged I/O granted by ioperm/iopl.
++ If you are not using XFree86, you may be able to stop this additional
++ case by enabling the 'Disable privileged I/O' option. Though nothing
++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++ but only to video memory, which is the only writing we allow in this
++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++ not be allowed to mprotect it with PROT_WRITE later.
++ It is highly recommended that you say Y here if you meet all the
++ conditions above.
++
++config GRKERNSEC_VM86
++ bool "Restrict VM86 mode"
++ depends on X86_32
++
++ help
++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
++ make use of a special execution mode on 32bit x86 processors called
++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
++ video cards and will still work with this option enabled. The purpose
++ of the option is to prevent exploitation of emulation errors in
++ virtualization of vm86 mode like the one discovered in VMWare in 2009.
++ Nearly all users should be able to enable this option.
++
++config GRKERNSEC_IO
++ bool "Disable privileged I/O"
++ depends on X86
++ select RTC_CLASS
++ select RTC_INTF_DEV
++ select RTC_DRV_CMOS
++
++ help
++ If you say Y here, all ioperm and iopl calls will return an error.
++ Ioperm and iopl can be used to modify the running kernel.
++ Unfortunately, some programs need this access to operate properly,
++ the most notable of which are XFree86 and hwclock. hwclock can be
++ remedied by having RTC support in the kernel, so real-time
++ clock support is enabled if this option is enabled, to ensure
++ that hwclock operates correctly. XFree86 still will not
++ operate correctly with this option enabled, so DO NOT CHOOSE Y
++ IF YOU USE XFree86. If you use XFree86 and you still want to
++ protect your kernel against modification, use the RBAC system.
++
++config GRKERNSEC_PROC_MEMMAP
++ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
++ default y if (PAX_NOEXEC || PAX_ASLR)
++ depends on PAX_NOEXEC || PAX_ASLR
++ help
++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++ give no information about the addresses of its mappings if
++ PaX features that rely on random addresses are enabled on the task.
++ If you use PaX it is greatly recommended that you say Y here as it
++ closes up a hole that makes the full ASLR useless for suid
++ binaries.
++
++config GRKERNSEC_BRUTE
++ bool "Deter exploit bruteforcing"
++ help
++ If you say Y here, attempts to bruteforce exploits against forking
++ daemons such as apache or sshd will be deterred. When a child of a
++ forking daemon is killed by PaX or crashes due to an illegal
++ instruction, the parent process will be delayed 30 seconds upon every
++ subsequent fork until the administrator is able to assess the
++ situation and restart the daemon. It is recommended that you also
++ enable signal logging in the auditing section so that logs are
++ generated when a process performs an illegal instruction.
++
++config GRKERNSEC_MODHARDEN
++ bool "Harden module auto-loading"
++ depends on MODULES
++ help
++ If you say Y here, module auto-loading in response to use of some
++ feature implemented by an unloaded module will be restricted to
++ root users. Enabling this option helps defend against attacks
++ by unprivileged users who abuse the auto-loading behavior to
++ cause a vulnerable module to load that is then exploited.
++
++ If this option prevents a legitimate use of auto-loading for a
++ non-root user, the administrator can execute modprobe manually
++ with the exact name of the module mentioned in the alert log.
++ Alternatively, the administrator can add the module to the list
++ of modules loaded at boot by modifying init scripts.
++
++ Modification of init scripts will most likely be needed on
++ Ubuntu servers with encrypted home directory support enabled,
++ as the first non-root user logging in will cause the ecb(aes),
++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
++
++config GRKERNSEC_HIDESYM
++ bool "Hide kernel symbols"
++ help
++ If you say Y here, getting information on loaded modules, and
++ displaying all kernel symbols through a syscall will be restricted
++ to users with CAP_SYS_MODULE. For software compatibility reasons,
++ /proc/kallsyms will be restricted to the root user. The RBAC
++ system can hide that entry even from root. Note that this option
++ is only effective provided the following conditions are met:
++ 1) The kernel using grsecurity is not precompiled by some distribution
++ 2) You are using the RBAC system and hiding other files such as your
++ kernel image and System.map. Alternatively, enabling this option
++ causes the permissions on /boot, /lib/modules, and the kernel
++ source directory to change at compile time to prevent
++ reading by non-root users.
++ If the above conditions are met, this option will aid in providing a
++ useful protection against local kernel exploitation of overflows
++ and arbitrary read/write vulnerabilities.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_NO_RBAC
++ bool "Disable RBAC system"
++ help
++ If you say Y here, the /dev/grsec device will be removed from the kernel,
++ preventing the RBAC system from being enabled. You should only say Y
++ here if you have no intention of using the RBAC system, so as to prevent
++ an attacker with root access from misusing the RBAC system to hide files
++ and processes when loadable module support and /dev/[k]mem have been
++ locked down.
++
++config GRKERNSEC_ACL_HIDEKERN
++ bool "Hide kernel processes"
++ help
++ If you say Y here, all kernel threads will be hidden to all
++ processes but those whose subject has the "view hidden processes"
++ flag.
++
++config GRKERNSEC_ACL_MAXTRIES
++ int "Maximum tries before password lockout"
++ default 3
++ help
++ This option enforces the maximum number of times a user can attempt
++ to authorize themselves with the grsecurity RBAC system before being
++ denied the ability to attempt authorization again for a specified time.
++ The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++ int "Time to wait after max password tries, in seconds"
++ default 30
++ help
++ This option specifies the time the user must wait after attempting to
++ authorize to the RBAC system with the maximum number of invalid
++ passwords. The higher the number, the harder it will be to brute-force
++ a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++ bool "Proc restrictions"
++ help
++ If you say Y here, the permissions of the /proc filesystem
++ will be altered to enhance system security and privacy. You MUST
++ choose either a user only restriction or a user and group restriction.
++ Depending upon the option you choose, you can either restrict users to
++ see only the processes they themselves run, or choose a group that can
++ view all processes and files normally restricted to root if you choose
++ the "restrict to user only" option. NOTE: If you're running identd as
++ a non-root user, you will have to run it as the group you specify here.
++
++config GRKERNSEC_PROC_USER
++ bool "Restrict /proc to user only"
++ depends on GRKERNSEC_PROC
++ help
++ If you say Y here, non-root users will only be able to view their own
++ processes, and restricts them from viewing network-related information,
++ and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++ bool "Allow special group"
++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++ help
++ If you say Y here, you will be able to select a group that will be
++ able to view all processes, network-related information, and
++ kernel and symbol information. This option is useful if you want
++ to run identd as a non-root user.
++
++config GRKERNSEC_PROC_GID
++ int "GID for special group"
++ depends on GRKERNSEC_PROC_USERGROUP
++ default 1001
++
++config GRKERNSEC_PROC_ADD
++ bool "Additional restrictions"
++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++ help
++ If you say Y here, additional restrictions will be placed on
++ /proc that keep normal users from viewing device information and
++ slabinfo information that could be useful for exploits.
++
++config GRKERNSEC_LINK
++ bool "Linking restrictions"
++ help
++ If you say Y here, /tmp race exploits will be prevented, since users
++ will no longer be able to follow symlinks owned by other users in
++ world-writable +t directories (i.e. /tmp), unless the owner of the
++ symlink is the owner of the directory. users will also not be
++ able to hardlink to files they do not own. If the sysctl option is
++ enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_FIFO
++ bool "FIFO restrictions"
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (i.e. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++config GRKERNSEC_ROFS
++ bool "Runtime read-only mount protection"
++ help
++ If you say Y here, a sysctl option with name "romount_protect" will
++ be created. By setting this option to 1 at runtime, filesystems
++ will be protected in the following ways:
++ * No new writable mounts will be allowed
++ * Existing read-only mounts won't be able to be remounted read/write
++ * Write operations will be denied on all block devices
++ This option acts independently of grsec_lock: once it is set to 1,
++ it cannot be turned off. Therefore, please be mindful of the resulting
++ behavior if this option is enabled in an init script on a read-only
++ filesystem. This feature is mainly intended for secure embedded systems.
++
++config GRKERNSEC_CHROOT
++ bool "Chroot jail restrictions"
++ help
++ If you say Y here, you will be able to choose several options that will
++ make breaking out of a chrooted jail much more difficult. If you
++ encounter no software incompatibilities with the following options, it
++ is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++ bool "Deny mounts"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ mount or remount filesystems. If the sysctl option is enabled, a
++ sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++ bool "Deny double-chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chroot
++ again outside the chroot. This is a widely used method of breaking
++ out of a chroot jail and should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name
++ "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++ bool "Deny pivot_root in chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to use
++ a function called pivot_root() that was introduced in Linux 2.3.41. It
++ works similar to chroot in that it changes the root filesystem. This
++ function could be misused in a chrooted process to attempt to break out
++ of the chroot, and therefore should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
++ created.
++
++config GRKERNSEC_CHROOT_CHDIR
++ bool "Enforce chdir(\"/\") on all chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the current working directory of all newly-chrooted
++ applications will be set to the the root directory of the chroot.
++ The man page on chroot(2) states:
++ Note that this call does not change the current working
++ directory, so that `.' can be outside the tree rooted at
++ `/'. In particular, the super-user can escape from a
++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++ It is recommended that you say Y here, since it's not known to break
++ any software. If the sysctl option is enabled, a sysctl option with
++ name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++ bool "Deny (f)chmod +s"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chmod
++ or fchmod files to make them have suid or sgid bits. This protects
++ against another published method of breaking a chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
++ created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++ bool "Deny fchdir out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
++ to a file descriptor of the chrooting process that points to a directory
++ outside the filesystem will be stopped. If the sysctl option
++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++ bool "Deny mknod"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be allowed to
++ mknod. The problem with using mknod inside a chroot is that it
++ would allow an attacker to create a device entry that is the same
++ as one on the physical root of your system, which could range from
++ anything from the console device to a device for your harddrive (which
++ they could then use to wipe the drive or steal data). It is recommended
++ that you say Y here, unless you run into software incompatibilities.
++ If the sysctl option is enabled, a sysctl option with name
++ "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++ bool "Deny shmat() out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to attach
++ to shared memory segments that were created outside of the chroot jail.
++ It is recommended that you say Y here. If the sysctl option is enabled,
++ a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ connect to abstract (meaning not belonging to a filesystem) Unix
++ domain sockets that were bound outside of a chroot. It is recommended
++ that you say Y here. If the sysctl option is enabled, a sysctl option
++ with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++ bool "Protect outside processes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
++ getsid, or view any process outside of the chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_findtask" is
++ created.
++
++config GRKERNSEC_CHROOT_NICE
++ bool "Restrict priority changes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to raise
++ the priority of processes in the chroot, or alter the priority of
++ processes outside the chroot. This provides more security than simply
++ removing CAP_SYS_NICE from the process' capability set. If the
++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++ is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++ bool "Deny sysctl writes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, an attacker in a chroot will not be able to
++ write to sysctl entries, either by sysctl(2) or through a /proc
++ interface. It is strongly recommended that you say Y here. If the
++ sysctl option is enabled, a sysctl option with name
++ "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++ bool "Capability restrictions"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the capabilities on all root processes within a
++ chroot jail will be lowered to stop module insertion, raw i/o,
++ system and net admin tasks, rebooting the system, modifying immutable
++ files, modifying IPC owned by another, and changing the system time.
++ This is left an option because it can break some apps. Disable this
++ if your chrooted apps are having problems performing those kinds of
++ tasks. If the sysctl option is enabled, a sysctl option with
++ name "chroot_caps" is created.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++ bool "Single group for auditing"
++ help
++ If you say Y here, the exec, chdir, and (un)mount logging features
++ will only operate on a group you specify. This option is recommended
++ if you only want to watch certain users instead of having a large
++ amount of logs from the entire system. If the sysctl option is enabled,
++ a sysctl option with name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++ int "GID for auditing"
++ depends on GRKERNSEC_AUDIT_GROUP
++ default 1007
++
++config GRKERNSEC_EXECLOG
++ bool "Exec logging"
++ help
++ If you say Y here, all execve() calls will be logged (since the
++ other exec*() calls are frontends to execve(), all execution
++ will be logged). Useful for shell-servers that like to keep track
++ of their users. If the sysctl option is enabled, a sysctl option with
++ name "exec_logging" is created.
++ WARNING: This option when enabled will produce a LOT of logs, especially
++ on an active system.
++
++config GRKERNSEC_RESLOG
++ bool "Resource logging"
++ help
++ If you say Y here, all attempts to overstep resource limits will
++ be logged with the resource name, the requested size, and the current
++ limit. It is highly recommended that you say Y here. If the sysctl
++ option is enabled, a sysctl option with name "resource_logging" is
++ created. If the RBAC system is enabled, the sysctl value is ignored.
++
++config GRKERNSEC_CHROOT_EXECLOG
++ bool "Log execs within chroot"
++ help
++ If you say Y here, all executions inside a chroot jail will be logged
++ to syslog. This can cause a large amount of logs if certain
++ applications (eg. djb's daemontools) are installed on the system, and
++ is therefore left as an option. If the sysctl option is enabled, a
++ sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++ bool "Chdir logging"
++ help
++ If you say Y here, all chdir() calls will be logged. If the sysctl
++ option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++ bool "(Un)Mount logging"
++ help
++ If you say Y here, all mounts and unmounts will be logged. If the
++ sysctl option is enabled, a sysctl option with name "audit_mount" is
++ created.
++
++config GRKERNSEC_SIGNAL
++ bool "Signal logging"
++ help
++ If you say Y here, certain important signals will be logged, such as
++ SIGSEGV, which will as a result inform you of when a error in a program
++ occurred, which in some cases could mean a possible exploit attempt.
++ If the sysctl option is enabled, a sysctl option with name
++ "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++ bool "Fork failure logging"
++ help
++ If you say Y here, all failed fork() attempts will be logged.
++ This could suggest a fork bomb, or someone attempting to overstep
++ their process limit. If the sysctl option is enabled, a sysctl option
++ with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++ bool "Time change logging"
++ help
++ If you say Y here, any changes of the system clock will be logged.
++ If the sysctl option is enabled, a sysctl option with name
++ "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++ bool "/proc/<pid>/ipaddr support"
++ help
++ If you say Y here, a new entry will be added to each /proc/<pid>
++ directory that contains the IP address of the person using the task.
++ The IP is carried across local TCP and AF_UNIX stream sockets.
++ This information can be useful for IDS/IPSes to perform remote response
++ to a local attack. The entry is readable by only the owner of the
++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++ the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_AUDIT_TEXTREL
++ bool 'ELF text relocations logging (READ HELP)'
++ depends on PAX_MPROTECT
++ help
++ If you say Y here, text relocations will be logged with the filename
++ of the offending library or binary. The purpose of the feature is
++ to help Linux distribution developers get rid of libraries and
++ binaries that need text relocations which hinder the future progress
++ of PaX. Only Linux distribution developers should say Y here, and
++ never on a production machine, as this option creates an information
++ leak that could aid an attacker in defeating the randomization of
++ a single memory region. If the sysctl option is enabled, a sysctl
++ option with name "audit_textrel" is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_EXECVE
++ bool "Enforce RLIMIT_NPROC on execs"
++ help
++ If you say Y here, users with a resource limit on processes will
++ have the value checked during execve() calls. The current system
++ only checks the system limit during fork() calls. If the sysctl option
++ is enabled, a sysctl option with name "execve_limiting" is created.
++
++config GRKERNSEC_DMESG
++ bool "Dmesg(8) restriction"
++ help
++ If you say Y here, non-root users will not be able to use dmesg(8)
++ to view up to the last 4kb of messages in the kernel's log buffer.
++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
++ created.
++
++config GRKERNSEC_HARDEN_PTRACE
++ bool "Deter ptrace-based process snooping"
++ help
++ If you say Y here, TTY sniffers and other malicious monitoring
++ programs implemented through ptrace will be defeated. If you
++ have been using the RBAC system, this option has already been
++ enabled for several years for all users, with the ability to make
++ fine-grained exceptions.
++
++ This option only affects the ability of non-root users to ptrace
++ processes that are not a descendent of the ptracing process.
++ This means that strace ./binary and gdb ./binary will still work,
++ but attaching to arbitrary processes will not. If the sysctl
++ option is enabled, a sysctl option with name "harden_ptrace" is
++ created.
++
++config GRKERNSEC_TPE
++ bool "Trusted Path Execution (TPE)"
++ help
++ If you say Y here, you will be able to choose a gid to add to the
++ supplementary groups of users you want to mark as "untrusted."
++ These users will not be able to execute any files that are not in
++ root-owned directories writable only by root. If the sysctl option
++ is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++ bool "Partially restrict non-root users"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, All non-root users other than the ones in the
++ group specified in the main TPE option will only be allowed to
++ execute files in directories they own that are not group or
++ world-writable, or in directories owned by root and writable only by
++ root. If the sysctl option is enabled, a sysctl option with name
++ "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_INVERT
++ bool "Invert GID option"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, the group you specify in the TPE configuration will
++ decide what group TPE restrictions will be *disabled* for. This
++ option is useful if you want TPE restrictions to be applied to most
++ users on the system.
++
++config GRKERNSEC_TPE_GID
++ int "GID for untrusted users"
++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ If you have selected the "Invert GID option" above, setting this
++ GID determines what group TPE restrictions will be *disabled* for.
++ If you have not selected the "Invert GID option" above, setting this
++ GID determines what group TPE restrictions will be *enabled* for.
++ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
++ is created.
++
++config GRKERNSEC_TPE_GID
++ int "GID for trusted users"
++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ If you have selected the "Invert GID option" above, setting this
++ GID determines what group TPE restrictions will be *disabled* for.
++ If you have not selected the "Invert GID option" above, setting this
++ GID determines what group TPE restrictions will be *enabled* for.
++ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
++ is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++ bool "Larger entropy pools"
++ help
++ If you say Y here, the entropy pools used for many features of Linux
++ and grsecurity will be doubled in size. Since several grsecurity
++ features use additional randomness, it is recommended that you say Y
++ here. Saying Y here has a similar effect as modifying
++ /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_BLACKHOLE
++ bool "TCP/UDP blackhole"
++ help
++ If you say Y here, neither TCP resets nor ICMP
++ destination-unreachable packets will be sent in response to packets
++ send to ports for which no associated listening process exists.
++ This feature supports both IPV4 and IPV6 and exempts the
++ loopback interface from blackholing. Enabling this feature
++ makes a host more resilient to DoS attacks and reduces network
++ visibility against scanners.
++
++config GRKERNSEC_SOCKET
++ bool "Socket restrictions"
++ help
++ If you say Y here, you will be able to choose from several options.
++ If you assign a GID on your system and add it to the supplementary
++ groups of users you want to restrict socket access to, this patch
++ will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++ bool "Deny any sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine or run server
++ applications from your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++ int "GID to deny all sockets for"
++ depends on GRKERNSEC_SOCKET_ALL
++ default 1004
++ help
++ Here you can choose the GID to disable socket access for. Remember to
++ add the users you want socket access disabled for to the GID
++ specified here. If the sysctl option is enabled, a sysctl option
++ with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++ bool "Deny client sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine, but will be
++ able to run servers. If this option is enabled, all users in the group
++ you specify will have to use passive mode when initiating ftp transfers
++ from the shell on your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++ int "GID to deny client sockets for"
++ depends on GRKERNSEC_SOCKET_CLIENT
++ default 1003
++ help
++ Here you can choose the GID to disable client socket access for.
++ Remember to add the users you want client socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++ bool "Deny server sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to run server applications from your machine. If the sysctl
++ option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++ int "GID to deny server sockets for"
++ depends on GRKERNSEC_SOCKET_SERVER
++ default 1002
++ help
++ Here you can choose the GID to disable server socket access for.
++ Remember to add the users you want server socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_server_gid" is created.
++
++endmenu
++menu "Sysctl support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++ bool "Sysctl support"
++ help
++ If you say Y here, you will be able to change the options that
++ grsecurity runs with at bootup, without having to recompile your
++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
++ to enable (1) or disable (0) various features. All the sysctl entries
++ are mutable until the "grsec_lock" entry is set to a non-zero value.
++ All features enabled in the kernel configuration are disabled at boot
++ if you do not say Y to the "Turn on features by default" option.
++ All options should be set at startup, and the grsec_lock entry should
++ be set to a non-zero value after all the options are set.
++ *THIS IS EXTREMELY IMPORTANT*
++
++config GRKERNSEC_SYSCTL_ON
++ bool "Turn on features by default"
++ depends on GRKERNSEC_SYSCTL
++ help
++ If you say Y here, instead of having all features enabled in the
++ kernel configuration disabled at boot time, the features will be
++ enabled at boot time. It is recommended you say Y here unless
++ there is some reason you would want all sysctl-tunable features to
++ be disabled by default. As mentioned elsewhere, it is important
++ to enable the grsec_lock entry once you have finished modifying
++ the sysctl entries.
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++ int "Seconds in between log messages (minimum)"
++ default 10
++ help
++ This option allows you to enforce the number of seconds between
++ grsecurity log messages. The default should be suitable for most
++ people, however, if you choose to change it, choose a value small enough
++ to allow informative logs to be produced, but large enough to
++ prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++ int "Number of messages in a burst (maximum)"
++ default 4
++ help
++ This option allows you to choose the maximum number of messages allowed
++ within the flood time interval you chose in a separate option. The
++ default should be suitable for most people, however if you find that
++ many of your logs are being interpreted as flooding, you may want to
++ raise this value.
++
++endmenu
++
++endmenu
+diff -urNp linux-2.6.31.7/grsecurity/Makefile linux-2.6.31.7/grsecurity/Makefile
+--- linux-2.6.31.7/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/grsecurity/Makefile 2009-12-08 17:39:44.224728422 -0500
+@@ -0,0 +1,29 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001-2009 it has been completely redesigned by Brad Spengler
++# into an RBAC system
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler - Open Source Security, Inc., and released
++# under the GPL v2 or higher
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
++ grsec_time.o grsec_tpe.o grsec_link.o grsec_textrel.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \
++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++ gracl_learn.o grsec_log.o
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
++ifdef CONFIG_GRKERNSEC_HIDESYM
++extra-y := grsec_hidesym.o
++$(obj)/grsec_hidesym.o:
++ @-chmod -f 500 /boot
++ @-chmod -f 500 /lib/modules
++ @-chmod -f 700 .
++ @echo ' grsec: protected kernel image paths'
++endif
+diff -urNp linux-2.6.31.7/include/acpi/acpi_drivers.h linux-2.6.31.7/include/acpi/acpi_drivers.h
+--- linux-2.6.31.7/include/acpi/acpi_drivers.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/acpi/acpi_drivers.h 2009-12-08 17:39:44.231808097 -0500
+@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
+ Dock Station
+ -------------------------------------------------------------------------- */
+ struct acpi_dock_ops {
+- acpi_notify_handler handler;
+- acpi_notify_handler uevent;
++ const acpi_notify_handler handler;
++ const acpi_notify_handler uevent;
+ };
+
+ #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
+@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
+ extern int register_dock_notifier(struct notifier_block *nb);
+ extern void unregister_dock_notifier(struct notifier_block *nb);
+ extern int register_hotplug_dock_device(acpi_handle handle,
+- struct acpi_dock_ops *ops,
++ const struct acpi_dock_ops *ops,
+ void *context);
+ extern void unregister_hotplug_dock_device(acpi_handle handle);
+ #else
+@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
+ {
+ }
+ static inline int register_hotplug_dock_device(acpi_handle handle,
+- struct acpi_dock_ops *ops,
++ const struct acpi_dock_ops *ops,
+ void *context)
+ {
+ return -ENODEV;
+diff -urNp linux-2.6.31.7/include/asm-generic/atomic.h linux-2.6.31.7/include/asm-generic/atomic.h
+--- linux-2.6.31.7/include/asm-generic/atomic.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/atomic.h 2009-12-08 17:39:44.231808097 -0500
+@@ -36,6 +36,15 @@
+ #define atomic_read(v) ((v)->counter)
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v. Note that the guaranteed
++ * useful range of an atomic_unchecked_t is only 24 bits.
++ */
++#define atomic_read_unchecked(v) ((v)->counter)
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -45,6 +54,16 @@
+ */
+ #define atomic_set(v, i) (((v)->counter) = (i))
+
++/**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i. Note that the guaranteed
++ * useful range of an atomic_unchecked_t is only 24 bits.
++ */
++#define atomic_set_unchecked(v, i) (((v)->counter) = (i))
++
+ #include <asm/system.h>
+
+ /**
+@@ -101,16 +120,31 @@ static inline void atomic_add(int i, ato
+ atomic_add_return(i, v);
+ }
+
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_add_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+ atomic_sub_return(i, v);
+ }
+
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ atomic_sub_return(i, (atomic_t *)v);
++}
++
+ static inline void atomic_inc(atomic_t *v)
+ {
+ atomic_add_return(1, v);
+ }
+
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_return(1, (atomic_t *)v);
++}
++
+ static inline void atomic_dec(atomic_t *v)
+ {
+ atomic_sub_return(1, v);
+diff -urNp linux-2.6.31.7/include/asm-generic/dma-mapping-common.h linux-2.6.31.7/include/asm-generic/dma-mapping-common.h
+--- linux-2.6.31.7/include/asm-generic/dma-mapping-common.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/dma-mapping-common.h 2009-12-08 17:39:44.231808097 -0500
+@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
+ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
+ size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+@@ -110,7 +110,7 @@ static inline void dma_sync_single_for_d
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+@@ -125,7 +125,7 @@ static inline void dma_sync_single_range
+ size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_range_for_cpu) {
+@@ -143,7 +143,7 @@ static inline void dma_sync_single_range
+ size_t size,
+ enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_range_for_device) {
+@@ -159,7 +159,7 @@ static inline void
+ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_cpu)
+@@ -172,7 +172,7 @@ static inline void
+ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_device)
+diff -urNp linux-2.6.31.7/include/asm-generic/futex.h linux-2.6.31.7/include/asm-generic/futex.h
+--- linux-2.6.31.7/include/asm-generic/futex.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/futex.h 2009-12-08 17:39:44.232811641 -0500
+@@ -6,7 +6,7 @@
+ #include <asm/errno.h>
+
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
++futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ {
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
+ }
+
+ static inline int
+-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
++futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
+ {
+ return -ENOSYS;
+ }
+diff -urNp linux-2.6.31.7/include/asm-generic/int-l64.h linux-2.6.31.7/include/asm-generic/int-l64.h
+--- linux-2.6.31.7/include/asm-generic/int-l64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/int-l64.h 2009-12-08 17:39:44.232811641 -0500
+@@ -46,6 +46,8 @@ typedef unsigned int u32;
+ typedef signed long s64;
+ typedef unsigned long u64;
+
++typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
++
+ #define S8_C(x) x
+ #define U8_C(x) x ## U
+ #define S16_C(x) x
+diff -urNp linux-2.6.31.7/include/asm-generic/int-ll64.h linux-2.6.31.7/include/asm-generic/int-ll64.h
+--- linux-2.6.31.7/include/asm-generic/int-ll64.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/int-ll64.h 2009-12-08 17:39:44.232811641 -0500
+@@ -51,6 +51,8 @@ typedef unsigned int u32;
+ typedef signed long long s64;
+ typedef unsigned long long u64;
+
++typedef unsigned long long intoverflow_t;
++
+ #define S8_C(x) x
+ #define U8_C(x) x ## U
+ #define S16_C(x) x
+diff -urNp linux-2.6.31.7/include/asm-generic/kmap_types.h linux-2.6.31.7/include/asm-generic/kmap_types.h
+--- linux-2.6.31.7/include/asm-generic/kmap_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/kmap_types.h 2009-12-08 17:39:44.232811641 -0500
+@@ -27,7 +27,8 @@ D(15) KM_UML_USERCOPY, /* UML specific,
+ D(16) KM_IRQ_PTE,
+ D(17) KM_NMI,
+ D(18) KM_NMI_PTE,
+-D(19) KM_TYPE_NR
++D(19) KM_CLEARPAGE,
++D(20) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.31.7/include/asm-generic/pgtable.h linux-2.6.31.7/include/asm-generic/pgtable.h
+--- linux-2.6.31.7/include/asm-generic/pgtable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/pgtable.h 2009-12-08 17:39:44.233808850 -0500
+@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
+ unsigned long size);
+ #endif
+
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+diff -urNp linux-2.6.31.7/include/asm-generic/vmlinux.lds.h linux-2.6.31.7/include/asm-generic/vmlinux.lds.h
+--- linux-2.6.31.7/include/asm-generic/vmlinux.lds.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/asm-generic/vmlinux.lds.h 2009-12-08 17:39:44.233808850 -0500
+@@ -201,6 +201,7 @@
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
++ *(.data.read_only) \
+ *(__vermagic) /* Kernel version magic */ \
+ *(__markers_strings) /* Markers: strings */ \
+ *(__tracepoints_strings)/* Tracepoints: strings */ \
+@@ -641,22 +642,24 @@
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU().
+ */
+ #define PERCPU_VADDR(vaddr, phdr) \
+- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
++ per_cpu_load = .; \
++ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
+ - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ *(.data.percpu.first) \
+- *(.data.percpu.page_aligned) \
+ *(.data.percpu) \
++ . = ALIGN(PAGE_SIZE); \
++ *(.data.percpu.page_aligned) \
+ *(.data.percpu.shared_aligned) \
+ VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ } phdr \
+- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
+
+ /**
+ * PERCPU - define output section for percpu area, simple version
+diff -urNp linux-2.6.31.7/include/drm/drm_pciids.h linux-2.6.31.7/include/drm/drm_pciids.h
+--- linux-2.6.31.7/include/drm/drm_pciids.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/drm/drm_pciids.h 2009-12-08 17:39:44.234813912 -0500
+@@ -375,7 +375,7 @@
+ {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define r128_PCI_IDS \
+ {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -415,14 +415,14 @@
+ {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define mga_PCI_IDS \
+ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
+ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define mach64_PCI_IDS \
+ {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -445,7 +445,7 @@
+ {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define sisdrv_PCI_IDS \
+ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -456,7 +456,7 @@
+ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define tdfx_PCI_IDS \
+ {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -465,7 +465,7 @@
+ {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define viadrv_PCI_IDS \
+ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -477,14 +477,14 @@
+ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
+ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define i810_PCI_IDS \
+ {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define i830_PCI_IDS \
+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+@@ -492,11 +492,11 @@
+ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define gamma_PCI_IDS \
+ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define savage_PCI_IDS \
+ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+@@ -522,10 +522,10 @@
+ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define ffb_PCI_IDS \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+
+ #define i915_PCI_IDS \
+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+@@ -558,4 +558,4 @@
+ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+- {0, 0, 0}
++ {0, 0, 0, 0, 0, 0}
+diff -urNp linux-2.6.31.7/include/drm/drmP.h linux-2.6.31.7/include/drm/drmP.h
+--- linux-2.6.31.7/include/drm/drmP.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/drm/drmP.h 2009-12-08 17:39:44.233808850 -0500
+@@ -787,7 +787,7 @@ struct drm_driver {
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /* Driver private ops for this object */
+- struct vm_operations_struct *gem_vm_ops;
++ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+@@ -890,7 +890,7 @@ struct drm_device {
+
+ /** \name Usage Counters */
+ /*@{ */
+- int open_count; /**< Outstanding files open */
++ atomic_t open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ atomic_t vma_count; /**< Outstanding vma areas open */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+@@ -901,7 +901,7 @@ struct drm_device {
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+- atomic_t counts[15];
++ atomic_unchecked_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+diff -urNp linux-2.6.31.7/include/linux/agp_backend.h linux-2.6.31.7/include/linux/agp_backend.h
+--- linux-2.6.31.7/include/linux/agp_backend.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/agp_backend.h 2009-12-08 17:39:44.234813912 -0500
+@@ -53,7 +53,7 @@ struct agp_kern_info {
+ int current_memory;
+ bool cant_use_aperture;
+ unsigned long page_mask;
+- struct vm_operations_struct *vm_ops;
++ const struct vm_operations_struct *vm_ops;
+ };
+
+ /*
+diff -urNp linux-2.6.31.7/include/linux/a.out.h linux-2.6.31.7/include/linux/a.out.h
+--- linux-2.6.31.7/include/linux/a.out.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/a.out.h 2009-12-08 17:39:44.234813912 -0500
+@@ -39,6 +39,14 @@ enum machine_type {
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -urNp linux-2.6.31.7/include/linux/atmdev.h linux-2.6.31.7/include/linux/atmdev.h
+--- linux-2.6.31.7/include/linux/atmdev.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/atmdev.h 2009-12-08 17:39:44.235810884 -0500
+@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
+ #endif
+
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -urNp linux-2.6.31.7/include/linux/backlight.h linux-2.6.31.7/include/linux/backlight.h
+--- linux-2.6.31.7/include/linux/backlight.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/backlight.h 2009-12-08 17:39:44.235810884 -0500
+@@ -31,18 +31,18 @@ struct backlight_device;
+ struct fb_info;
+
+ struct backlight_ops {
+- unsigned int options;
++ const unsigned int options;
+
+ #define BL_CORE_SUSPENDRESUME (1 << 0)
+
+ /* Notify the backlight driver some property has changed */
+- int (*update_status)(struct backlight_device *);
++ int (* const update_status)(struct backlight_device *);
+ /* Return the current backlight brightness (accounting for power,
+ fb_blank etc.) */
+- int (*get_brightness)(struct backlight_device *);
++ int (* const get_brightness)(struct backlight_device *);
+ /* Check if given framebuffer device is the one bound to this backlight;
+ return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
+- int (*check_fb)(struct fb_info *);
++ int (* const check_fb)(struct fb_info *);
+ };
+
+ /* This structure defines all the properties of a backlight */
+@@ -81,7 +81,7 @@ struct backlight_device {
+ registered this device has been unloaded, and if class_get_devdata()
+ points to something in the body of that driver, it is also invalid. */
+ struct mutex ops_lock;
+- struct backlight_ops *ops;
++ const struct backlight_ops *ops;
+
+ /* The framebuffer notifier block */
+ struct notifier_block fb_notif;
+@@ -98,7 +98,7 @@ static inline void backlight_update_stat
+ }
+
+ extern struct backlight_device *backlight_device_register(const char *name,
+- struct device *dev, void *devdata, struct backlight_ops *ops);
++ struct device *dev, void *devdata, const struct backlight_ops *ops);
+ extern void backlight_device_unregister(struct backlight_device *bd);
+
+ #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+diff -urNp linux-2.6.31.7/include/linux/binfmts.h linux-2.6.31.7/include/linux/binfmts.h
+--- linux-2.6.31.7/include/linux/binfmts.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/binfmts.h 2009-12-08 17:39:44.235810884 -0500
+@@ -78,6 +78,7 @@ struct linux_binfmt {
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
+ unsigned long min_coredump; /* minimal dump size */
+ int hasvdso;
+ };
+diff -urNp linux-2.6.31.7/include/linux/cache.h linux-2.6.31.7/include/linux/cache.h
+--- linux-2.6.31.7/include/linux/cache.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/cache.h 2009-12-08 17:39:44.236660208 -0500
+@@ -16,6 +16,10 @@
+ #define __read_mostly
+ #endif
+
++#ifndef __read_only
++#define __read_only __read_mostly
++#endif
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff -urNp linux-2.6.31.7/include/linux/capability.h linux-2.6.31.7/include/linux/capability.h
+--- linux-2.6.31.7/include/linux/capability.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/capability.h 2009-12-08 17:39:44.236660208 -0500
+@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
+ (security_real_capable_noaudit((t), (cap)) == 0)
+
+ extern int capable(int cap);
++int capable_nolog(int cap);
+
+ /* audit system wants to get cap info from files as well */
+ struct dentry;
+diff -urNp linux-2.6.31.7/include/linux/cgroup.h linux-2.6.31.7/include/linux/cgroup.h
+--- linux-2.6.31.7/include/linux/cgroup.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/cgroup.h 2009-12-08 17:39:44.236660208 -0500
+@@ -37,7 +37,7 @@ extern void cgroup_exit(struct task_stru
+ extern int cgroupstats_build(struct cgroupstats *stats,
+ struct dentry *dentry);
+
+-extern struct file_operations proc_cgroup_operations;
++extern const struct file_operations proc_cgroup_operations;
+
+ /* Define the enumeration of all cgroup subsystems */
+ #define SUBSYS(_x) _x ## _subsys_id,
+diff -urNp linux-2.6.31.7/include/linux/compiler-gcc4.h linux-2.6.31.7/include/linux/compiler-gcc4.h
+--- linux-2.6.31.7/include/linux/compiler-gcc4.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/compiler-gcc4.h 2009-12-08 17:39:44.236660208 -0500
+@@ -36,4 +36,8 @@
+ the kernel context */
+ #define __cold __attribute__((__cold__))
+
++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
++#define __bos0(ptr) __bos((ptr), 0)
++#define __bos1(ptr) __bos((ptr), 1)
+ #endif
+diff -urNp linux-2.6.31.7/include/linux/compiler.h linux-2.6.31.7/include/linux/compiler.h
+--- linux-2.6.31.7/include/linux/compiler.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/compiler.h 2009-12-08 17:39:44.237805853 -0500
+@@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
+ #define __cold
+ #endif
+
++#ifndef __alloc_size
++#define __alloc_size
++#endif
++
++#ifndef __bos
++#define __bos
++#endif
++
++#ifndef __bos0
++#define __bos0
++#endif
++
++#ifndef __bos1
++#define __bos1
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+diff -urNp linux-2.6.31.7/include/linux/cpumask.h linux-2.6.31.7/include/linux/cpumask.h
+--- linux-2.6.31.7/include/linux/cpumask.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/cpumask.h 2009-12-08 17:39:44.237805853 -0500
+@@ -142,7 +142,6 @@
+ #include <linux/bitmap.h>
+
+ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+-extern cpumask_t _unused_cpumask_arg_;
+
+ #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
+ #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+diff -urNp linux-2.6.31.7/include/linux/decompress/mm.h linux-2.6.31.7/include/linux/decompress/mm.h
+--- linux-2.6.31.7/include/linux/decompress/mm.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/decompress/mm.h 2009-12-08 17:39:44.237805853 -0500
+@@ -68,7 +68,7 @@ static void free(void *where)
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+
+ #define large_malloc(a) vmalloc(a)
+diff -urNp linux-2.6.31.7/include/linux/elf.h linux-2.6.31.7/include/linux/elf.h
+--- linux-2.6.31.7/include/linux/elf.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/elf.h 2009-12-08 17:39:44.237805853 -0500
+@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
+ #define PT_GNU_EH_FRAME 0x6474e550
+
+ #define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
+
+ /* These constants define the different elf file types */
+ #define ET_NONE 0
+@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_ENCODING 32
+ #define OLD_DT_LOOS 0x60000000
+ #define DT_LOOS 0x6000000d
+@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -385,6 +413,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_phdr elf32_phdr
+ #define elf_note elf32_note
+ #define elf_addr_t Elf32_Off
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -393,6 +422,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_phdr elf64_phdr
+ #define elf_note elf64_note
+ #define elf_addr_t Elf64_Off
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -urNp linux-2.6.31.7/include/linux/fs.h linux-2.6.31.7/include/linux/fs.h
+--- linux-2.6.31.7/include/linux/fs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/fs.h 2009-12-08 17:39:44.238810496 -0500
+@@ -87,6 +87,10 @@ struct inodes_stat_t {
+ */
+ #define FMODE_NOCMTIME ((__force fmode_t)2048)
+
++/* Hack for grsec so as not to require read permission simply to execute
++ a binary */
++#define FMODE_GREXEC ((__force fmode_t)8192)
++
+ /*
+ * The below are the various read and write types that we support. Some of
+ * them include behavioral modifiers that send information down to the
+@@ -1024,19 +1028,19 @@ static inline int file_check_writeable(s
+ typedef struct files_struct *fl_owner_t;
+
+ struct file_lock_operations {
+- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+- void (*fl_release_private)(struct file_lock *);
++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
++ void (* const fl_release_private)(struct file_lock *);
+ };
+
+ struct lock_manager_operations {
+- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
+- void (*fl_notify)(struct file_lock *); /* unblock callback */
+- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
+- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+- void (*fl_release_private)(struct file_lock *);
+- void (*fl_break)(struct file_lock *);
+- int (*fl_mylease)(struct file_lock *, struct file_lock *);
+- int (*fl_change)(struct file_lock **, int);
++ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
++ void (* const fl_notify)(struct file_lock *); /* unblock callback */
++ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
++ void (* const fl_release_private)(struct file_lock *);
++ void (* const fl_break)(struct file_lock *);
++ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
++ int (* const fl_change)(struct file_lock **, int);
+ };
+
+ struct lock_manager {
+@@ -1067,8 +1071,8 @@ struct file_lock {
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ unsigned long fl_break_time; /* for nonblocking lease breaks */
+
+- struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+- struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
++ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
++ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+@@ -1435,7 +1439,7 @@ struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
++ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
+ * array */
+ };
+ int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+@@ -2430,7 +2434,7 @@ static int __fops ## _open(struct inode
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+ } \
+-static struct file_operations __fops = { \
++static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+diff -urNp linux-2.6.31.7/include/linux/fs_struct.h linux-2.6.31.7/include/linux/fs_struct.h
+--- linux-2.6.31.7/include/linux/fs_struct.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/fs_struct.h 2009-12-08 17:39:44.238810496 -0500
+@@ -4,7 +4,7 @@
+ #include <linux/path.h>
+
+ struct fs_struct {
+- int users;
++ atomic_t users;
+ rwlock_t lock;
+ int umask;
+ int in_exec;
+diff -urNp linux-2.6.31.7/include/linux/genhd.h linux-2.6.31.7/include/linux/genhd.h
+--- linux-2.6.31.7/include/linux/genhd.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/genhd.h 2009-12-08 17:39:44.239805301 -0500
+@@ -161,7 +161,7 @@ struct gendisk {
+
+ struct timer_rand_state *random;
+
+- atomic_t sync_io; /* RAID */
++ atomic_unchecked_t sync_io; /* RAID */
+ struct work_struct async_notify;
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+diff -urNp linux-2.6.31.7/include/linux/gracl.h linux-2.6.31.7/include/linux/gracl.h
+--- linux-2.6.31.7/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/gracl.h 2009-12-08 17:39:44.239805301 -0500
+@@ -0,0 +1,309 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/capability.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION "grsecurity 2.1.14"
++#define GRSECURITY_VERSION 0x2114
++
++enum {
++ GR_SHUTDOWN = 0,
++ GR_ENABLE = 1,
++ GR_SPROLE = 2,
++ GR_RELOAD = 3,
++ GR_SEGVMOD = 4,
++ GR_STATUS = 5,
++ GR_UNSPROLE = 6,
++ GR_PASSSET = 7,
++ GR_SPROLEPAM = 8,
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++ GR_PW_LEN = 128,
++ GR_SALT_LEN = 16,
++ GR_SHA_LEN = 32,
++};
++
++enum {
++ GR_SPROLE_LEN = 64,
++};
++
++#define GR_NLIMITS 32
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++ unsigned char *rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++ __u32 key;
++ ino_t inode;
++ dev_t device;
++ char *name;
++ __u16 len;
++ __u8 deleted;
++ struct name_entry *prev;
++ struct name_entry *next;
++};
++
++struct inodev_entry {
++ struct name_entry *nentry;
++ struct inodev_entry *prev;
++ struct inodev_entry *next;
++};
++
++struct acl_role_db {
++ struct acl_role_label **r_hash;
++ __u32 r_size;
++};
++
++struct inodev_db {
++ struct inodev_entry **i_hash;
++ __u32 i_size;
++};
++
++struct name_db {
++ struct name_entry **n_hash;
++ __u32 n_size;
++};
++
++struct crash_uid {
++ uid_t uid;
++ unsigned long expires;
++};
++
++struct gr_hash_struct {
++ void **table;
++ void **nametable;
++ void *first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++
++struct acl_subject_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++
++ struct rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ uid_t *user_transitions;
++ gid_t *group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ struct acl_ip_label **ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ unsigned long expires;
++
++ struct acl_subject_label *parent_subject;
++ struct gr_hash_struct *hash;
++ struct acl_subject_label *prev;
++ struct acl_subject_label *next;
++
++ struct acl_object_label **obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip {
++ __u32 addr;
++ __u32 netmask;
++
++ struct role_allowed_ip *prev;
++ struct role_allowed_ip *next;
++};
++
++struct role_transition {
++ char *rolename;
++
++ struct role_transition *prev;
++ struct role_transition *next;
++};
++
++struct acl_role_label {
++ char *rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ unsigned long expires;
++
++ struct acl_subject_label *root_label;
++ struct gr_hash_struct *hash;
++
++ struct acl_role_label *prev;
++ struct acl_role_label *next;
++
++ struct role_transition *transitions;
++ struct role_allowed_ip *allowed_ips;
++ uid_t *domain_children;
++ __u16 domain_child_num;
++
++ struct acl_subject_label **subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++ struct acl_role_label **r_table;
++ __u32 num_pointers; /* Number of allocations to track */
++ __u32 num_roles; /* Number of roles */
++ __u32 num_domain_children; /* Number of domain children */
++ __u32 num_subjects; /* Number of subjects */
++ __u32 num_objects; /* Number of objects */
++};
++
++struct acl_object_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++
++ struct acl_subject_label *nested;
++ struct acl_object_label *globbed;
++
++ /* next two structures not used */
++
++ struct acl_object_label *prev;
++ struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++ char *iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ /* next two structures not used */
++
++ struct acl_ip_label *prev;
++ struct acl_ip_label *next;
++};
++
++struct gr_arg {
++ struct user_acl_role_db role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ struct sprole_pw *sprole_pws;
++ dev_t segv_device;
++ ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper {
++ struct gr_arg *arg;
++ __u32 version;
++ __u32 size;
++};
++
++struct subject_map {
++ struct acl_subject_label *user;
++ struct acl_subject_label *kernel;
++ struct subject_map *prev;
++ struct subject_map *next;
++};
++
++struct acl_subj_map_db {
++ struct subject_map **s_hash;
++ __u32 s_size;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++ Makes good use of the low bits of the inode. Generally 0-1 times
++ in loop for successful match. 0-3 for unsuccessful match.
++ Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned int
++rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++{
++ return ((((uid + type) << (16 + type)) ^ uid) % sz);
++}
++
++ static __inline__ unsigned int
++shash(const struct acl_subject_label *userp, const unsigned int sz)
++{
++ return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned int
++fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++{
++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned int
++nhash(const char *name, const __u16 len, const unsigned int sz)
++{
++ return full_name_hash((const unsigned char *)name, len) % sz;
++}
++
++#define FOR_EACH_ROLE_START(role) \
++ role = role_list; \
++ while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++ role = role->prev; \
++ }
++
++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
++ subj = NULL; \
++ iter = 0; \
++ while (iter < role->subj_hash_size) { \
++ if (subj == NULL) \
++ subj = role->subj_hash[iter]; \
++ if (subj == NULL) { \
++ iter++; \
++ continue; \
++ }
++
++#define FOR_EACH_SUBJECT_END(subj,iter) \
++ subj = subj->next; \
++ if (subj == NULL) \
++ iter++; \
++ }
++
++
++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
++ subj = role->hash->first; \
++ while (subj != NULL) {
++
++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
++ subj = subj->next; \
++ }
++
++#endif
++
+diff -urNp linux-2.6.31.7/include/linux/gralloc.h linux-2.6.31.7/include/linux/gralloc.h
+--- linux-2.6.31.7/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/gralloc.h 2009-12-08 17:39:44.239805301 -0500
+@@ -0,0 +1,9 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++void *acl_alloc_num(unsigned long num, unsigned long len);
++
++#endif
+diff -urNp linux-2.6.31.7/include/linux/grdefs.h linux-2.6.31.7/include/linux/grdefs.h
+--- linux-2.6.31.7/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/grdefs.h 2009-12-08 17:39:44.240716481 -0500
+@@ -0,0 +1,136 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++ GR_READY = 0x01,
++ GR_STATUS_INIT = 0x00 // disabled state
++};
++
++/* Begin ACL declarations */
++
++/* Role flags */
++
++enum {
++ GR_ROLE_USER = 0x0001,
++ GR_ROLE_GROUP = 0x0002,
++ GR_ROLE_DEFAULT = 0x0004,
++ GR_ROLE_SPECIAL = 0x0008,
++ GR_ROLE_AUTH = 0x0010,
++ GR_ROLE_NOPW = 0x0020,
++ GR_ROLE_GOD = 0x0040,
++ GR_ROLE_LEARN = 0x0080,
++ GR_ROLE_TPE = 0x0100,
++ GR_ROLE_DOMAIN = 0x0200,
++ GR_ROLE_PAM = 0x0400
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++ GR_DELETED = 0x80000000
++};
++
++/* ACL Object-only mode flags */
++enum {
++ GR_READ = 0x00000001,
++ GR_APPEND = 0x00000002,
++ GR_WRITE = 0x00000004,
++ GR_EXEC = 0x00000008,
++ GR_FIND = 0x00000010,
++ GR_INHERIT = 0x00000020,
++ GR_SETID = 0x00000040,
++ GR_CREATE = 0x00000080,
++ GR_DELETE = 0x00000100,
++ GR_LINK = 0x00000200,
++ GR_AUDIT_READ = 0x00000400,
++ GR_AUDIT_APPEND = 0x00000800,
++ GR_AUDIT_WRITE = 0x00001000,
++ GR_AUDIT_EXEC = 0x00002000,
++ GR_AUDIT_FIND = 0x00004000,
++ GR_AUDIT_INHERIT= 0x00008000,
++ GR_AUDIT_SETID = 0x00010000,
++ GR_AUDIT_CREATE = 0x00020000,
++ GR_AUDIT_DELETE = 0x00040000,
++ GR_AUDIT_LINK = 0x00080000,
++ GR_PTRACERD = 0x00100000,
++ GR_NOPTRACE = 0x00200000,
++ GR_SUPPRESS = 0x00400000,
++ GR_NOLEARN = 0x00800000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
++
++/* ACL subject-only mode flags */
++enum {
++ GR_KILL = 0x00000001,
++ GR_VIEW = 0x00000002,
++ GR_PROTECTED = 0x00000004,
++ GR_LEARN = 0x00000008,
++ GR_OVERRIDE = 0x00000010,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_DUMMY = 0x00000020,
++ GR_PROTSHM = 0x00000040,
++ GR_KILLPROC = 0x00000080,
++ GR_KILLIPPROC = 0x00000100,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_NOTROJAN = 0x00000200,
++ GR_PROTPROCFD = 0x00000400,
++ GR_PROCACCT = 0x00000800,
++ GR_RELAXPTRACE = 0x00001000,
++ GR_NESTED = 0x00002000,
++ GR_INHERITLEARN = 0x00004000,
++ GR_PROCFIND = 0x00008000,
++ GR_POVERRIDE = 0x00010000,
++ GR_KERNELAUTH = 0x00020000,
++};
++
++enum {
++ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
++ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
++ GR_PAX_ENABLE_MPROTECT = 0x0004,
++ GR_PAX_ENABLE_RANDMMAP = 0x0008,
++ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
++ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
++ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
++ GR_PAX_DISABLE_MPROTECT = 0x0400,
++ GR_PAX_DISABLE_RANDMMAP = 0x0800,
++ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
++};
++
++enum {
++ GR_ID_USER = 0x01,
++ GR_ID_GROUP = 0x02,
++};
++
++enum {
++ GR_ID_ALLOW = 0x01,
++ GR_ID_DENY = 0x02,
++};
++
++#define GR_CRASH_RES 31
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++ GR_RLIM_CPU_BUMP = 60,
++ GR_RLIM_FSIZE_BUMP = 50000,
++ GR_RLIM_DATA_BUMP = 10000,
++ GR_RLIM_STACK_BUMP = 1000,
++ GR_RLIM_CORE_BUMP = 10000,
++ GR_RLIM_RSS_BUMP = 500000,
++ GR_RLIM_NPROC_BUMP = 1,
++ GR_RLIM_NOFILE_BUMP = 5,
++ GR_RLIM_MEMLOCK_BUMP = 50000,
++ GR_RLIM_AS_BUMP = 500000,
++ GR_RLIM_LOCKS_BUMP = 2,
++ GR_RLIM_SIGPENDING_BUMP = 5,
++ GR_RLIM_MSGQUEUE_BUMP = 10000,
++ GR_RLIM_NICE_BUMP = 1,
++ GR_RLIM_RTPRIO_BUMP = 1,
++ GR_RLIM_RTTIME_BUMP = 1000000
++};
++
++#endif
+diff -urNp linux-2.6.31.7/include/linux/grinternal.h linux-2.6.31.7/include/linux/grinternal.h
+--- linux-2.6.31.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/grinternal.h 2009-12-08 17:39:44.240716481 -0500
+@@ -0,0 +1,212 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++void gr_add_learn_entry(const char *fmt, ...)
++ __attribute__ ((format (printf, 1, 2)));
++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++ const struct vfsmount *mnt);
++__u32 gr_check_create(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt, const __u32 mode);
++int gr_check_protected_task(const struct task_struct *task);
++__u32 to_gr_audit(const __u32 reqmode);
++int gr_set_acls(const int type);
++
++int gr_acl_is_enabled(void);
++char gr_roletype_to_char(void);
++
++void gr_handle_alertkill(struct task_struct *task);
++char *gr_to_filename(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename1(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename2(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename3(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++
++extern int grsec_enable_harden_ptrace;
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_shm;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_rofs;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_findtask;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_tpe;
++extern int grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_sidcaps;
++extern int grsec_enable_socket_all;
++extern int grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern int grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern int grsec_socket_server_gid;
++extern int grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_audit_textrel;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_resource_logging;
++extern int grsec_lock;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) (tsk->exec_file ? \
++ gr_to_filename2(tsk->exec_file->f_path.dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename3(tsk->parent->exec_file->f_path.dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
++ gr_to_filename(tsk->exec_file->f_path.dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename1(tsk->parent->exec_file->f_path.dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define proc_is_chrooted(tsk_a) ((tsk_a->pid > 1) && (tsk_a->fs != NULL) && \
++ ((init_task.fs->root.dentry != tsk_a->fs->root.dentry) && \
++ (tsk_a->nsproxy->mnt_ns->root->mnt_root != \
++ tsk_a->fs->root.dentry)))
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs != NULL) && (tsk_b->fs != NULL) && \
++ (tsk_a->fs->root.dentry == tsk_b->fs->root.dentry))
++
++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), task->comm, \
++ task->pid, cred->uid, \
++ cred->euid, cred->gid, cred->egid, \
++ gr_parent_task_fullpath(task), \
++ task->parent->comm, task->parent->pid, \
++ pcred->uid, pcred->euid, \
++ pcred->gid, pcred->egid
++
++#define GR_CHROOT_CAPS {{ \
++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
++
++#define security_learn(normal_msg,args...) \
++({ \
++ read_lock(&grsec_exec_file_lock); \
++ gr_add_learn_entry(normal_msg "\n", ## args); \
++ read_unlock(&grsec_exec_file_lock); \
++})
++
++enum {
++ GR_DO_AUDIT,
++ GR_DONT_AUDIT,
++ GR_DONT_AUDIT_GOOD
++};
++
++enum {
++ GR_TTYSNIFF,
++ GR_RBAC,
++ GR_RBAC_STR,
++ GR_STR_RBAC,
++ GR_RBAC_MODE2,
++ GR_RBAC_MODE3,
++ GR_FILENAME,
++ GR_SYSCTL_HIDDEN,
++ GR_NOARGS,
++ GR_ONE_INT,
++ GR_ONE_INT_TWO_STR,
++ GR_ONE_STR,
++ GR_STR_INT,
++ GR_TWO_INT,
++ GR_THREE_INT,
++ GR_FIVE_INT_TWO_STR,
++ GR_TWO_STR,
++ GR_THREE_STR,
++ GR_FOUR_STR,
++ GR_STR_FILENAME,
++ GR_FILENAME_STR,
++ GR_FILENAME_TWO_INT,
++ GR_FILENAME_TWO_INT_STR,
++ GR_TEXTREL,
++ GR_PTRACE,
++ GR_RESOURCE,
++ GR_CAP,
++ GR_SIG,
++ GR_SIG2,
++ GR_CRASH1,
++ GR_CRASH2,
++ GR_PSACCT
++};
++
++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
++
++#endif
++
++#endif
+diff -urNp linux-2.6.31.7/include/linux/grmsg.h linux-2.6.31.7/include/linux/grmsg.h
+--- linux-2.6.31.7/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/grmsg.h 2009-12-08 17:39:44.240716481 -0500
+@@ -0,0 +1,107 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
++#define GR_STOPMOD_MSG "denied modification of module state by "
++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
++#define GR_IOPERM_MSG "denied use of ioperm() by "
++#define GR_IOPL_MSG "denied use of iopl() by "
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
++#define GR_KMEM_MSG "denied write of /dev/kmem by "
++#define GR_PORT_OPEN_MSG "denied open of /dev/port by "
++#define GR_MEM_WRITE_MSG "denied write of /dev/mem by "
++#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by "
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u"
++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%u.%u.%u.%u"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%u.%u.%u.%u %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
++#define GR_NPROC_MSG "denied overstep of process limit by "
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
++#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for "
++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
++#define GR_FAILFORK_MSG "failed fork with errno %d by "
++#define GR_NICE_CHROOT_MSG "denied priority change by "
++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
++#define GR_TIME_MSG "time set by "
++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
++#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by "
++#define GR_BIND_MSG "denied bind() by "
++#define GR_CONNECT_MSG "denied connect() by "
++#define GR_BIND_ACL_MSG "denied bind() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by "
++#define GR_CONNECT_ACL_MSG "denied connect() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by "
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
++#define GR_CAP_ACL_MSG "use of %s denied for "
++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
++#define GR_NONROOT_MODLOAD_MSG "denied kernel module auto-load of %.64s by "
++#define GR_VM86_MSG "denied use of vm86 by "
+diff -urNp linux-2.6.31.7/include/linux/grsecurity.h linux-2.6.31.7/include/linux/grsecurity.h
+--- linux-2.6.31.7/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/include/linux/grsecurity.h 2009-12-08 17:39:44.240716481 -0500
+@@ -0,0 +1,200 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/binfmts.h>
++#include <linux/gracl.h>
++
++/* notify of brain-dead configs */
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
++#endif
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
++#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
++#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
++#endif
++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
++#error "CONFIG_PAX enabled, but no PaX options are enabled."
++#endif
++
++void gr_handle_brute_attach(struct task_struct *p);
++void gr_handle_brute_check(void);
++
++char gr_roletype_to_char(void);
++
++int gr_check_user_change(int real, int effective, int fs);
++int gr_check_group_change(int real, int effective, int fs);
++
++void gr_del_task_from_ip_table(struct task_struct *p);
++
++int gr_pid_is_chrooted(struct task_struct *p);
++int gr_handle_chroot_nice(void);
++int gr_handle_chroot_sysctl(const int op);
++int gr_handle_chroot_setpriority(struct task_struct *p,
++ const int niceval);
++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++int gr_handle_chroot_chroot(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_chroot_caps(struct path *path);
++void gr_handle_chroot_chdir(struct path *path);
++int gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const char *dev_name);
++int gr_handle_chroot_pivot(void);
++int gr_handle_chroot_unix(const pid_t pid);
++
++int gr_handle_rawio(const struct inode *inode);
++int gr_handle_nproc(void);
++
++void gr_handle_ioperm(void);
++void gr_handle_iopl(void);
++
++int gr_tpe_allow(const struct file *file);
++
++int gr_random_pid(void);
++
++void gr_log_forkfail(const int retval);
++void gr_log_timechange(void);
++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
++void gr_log_chdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_log_chroot_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
++void gr_log_remount(const char *devname, const int retval);
++void gr_log_unmount(const char *devname, const int retval);
++void gr_log_mount(const char *from, const char *to, const int retval);
++void gr_log_textrel(struct vm_area_struct *vma);
++
++int gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_fifo(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag,
++ const int acc_mode);
++int gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode,
++ const int mode, const char *to);
++
++int gr_is_capable(const int cap);
++int gr_is_capable_nolog(const int cap);
++void gr_learn_resource(const struct task_struct *task, const int limit,
++ const unsigned long wanted, const int gt);
++void gr_copy_label(struct task_struct *tsk);
++void gr_handle_crash(struct task_struct *task, const int sig);
++int gr_handle_signal(const struct task_struct *p, const int sig);
++int gr_check_crash_uid(const uid_t uid);
++int gr_check_protected_task(const struct task_struct *task);
++int gr_acl_handle_mmap(const struct file *file,
++ const unsigned long prot);
++int gr_acl_handle_mprotect(const struct file *file,
++ const unsigned long prot);
++int gr_check_hidden_task(const struct task_struct *tsk);
++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_utime(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_access(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++__u32 gr_acl_handle_chown(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_ptrace(struct task_struct *task, const long request);
++int gr_handle_proc_ptrace(struct task_struct *task);
++__u32 gr_acl_handle_execve(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_check_crash_exec(const struct file *filp);
++int gr_acl_is_enabled(void);
++void gr_set_kernel_label(struct task_struct *task);
++void gr_set_role_label(struct task_struct *task, const uid_t uid,
++ const gid_t gid);
++int gr_set_proc_label(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const int unsafe_share);
++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_open(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_creat(const struct dentry *dentry,
++ const struct dentry *p_dentry,
++ const struct vfsmount *p_mnt, const int fmode,
++ const int imode);
++void gr_handle_create(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const int mode);
++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt);
++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_delete(const ino_t ino, const dev_t dev);
++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const char *from);
++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt, const char *to);
++int gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname);
++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace);
++__u32 gr_check_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt);
++int gr_acl_handle_filldir(const struct file *file, const char *name,
++ const unsigned int namelen, const ino_t ino);
++
++__u32 gr_acl_handle_unix(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_acl_handle_exit(void);
++void gr_acl_handle_psacct(struct task_struct *task, const long code);
++int gr_acl_handle_procpidmem(const struct task_struct *task);
++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
++
++#ifdef CONFIG_GRKERNSEC
++void gr_log_nonroot_mod_load(const char *modname);
++void gr_handle_vm86(void);
++void gr_handle_mem_write(void);
++void gr_handle_kmem_write(void);
++void gr_handle_open_port(void);
++int gr_handle_mem_mmap(const unsigned long offset,
++ struct vm_area_struct *vma);
++
++extern int grsec_enable_dmesg;
++extern int grsec_enable_randsrc;
++extern int grsec_enable_shm;
++#endif
++
++#endif
+diff -urNp linux-2.6.31.7/include/linux/hdpu_features.h linux-2.6.31.7/include/linux/hdpu_features.h
+--- linux-2.6.31.7/include/linux/hdpu_features.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/hdpu_features.h 2009-12-08 17:39:44.241676868 -0500
+@@ -3,7 +3,7 @@
+ struct cpustate_t {
+ spinlock_t lock;
+ int excl;
+- int open_count;
++ atomic_t open_count;
+ unsigned char cached_val;
+ int inited;
+ unsigned long *set_addr;
+diff -urNp linux-2.6.31.7/include/linux/highmem.h linux-2.6.31.7/include/linux/highmem.h
+--- linux-2.6.31.7/include/linux/highmem.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/highmem.h 2009-12-08 17:39:44.241676868 -0500
+@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+
++static inline void sanitize_highpage(struct page *page)
++{
++ void *kaddr;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kaddr = kmap_atomic(page, KM_CLEARPAGE);
++ clear_page(kaddr);
++ kunmap_atomic(kaddr, KM_CLEARPAGE);
++ local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff -urNp linux-2.6.31.7/include/linux/hugetlb.h linux-2.6.31.7/include/linux/hugetlb.h
+--- linux-2.6.31.7/include/linux/hugetlb.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/hugetlb.h 2009-12-08 17:39:44.241676868 -0500
+@@ -146,7 +146,7 @@ static inline struct hugetlbfs_sb_info *
+ }
+
+ extern const struct file_operations hugetlbfs_file_operations;
+-extern struct vm_operations_struct hugetlb_vm_ops;
++extern const struct vm_operations_struct hugetlb_vm_ops;
+ struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
+ struct user_struct **user);
+ int hugetlb_get_quota(struct address_space *mapping, long delta);
+diff -urNp linux-2.6.31.7/include/linux/interrupt.h linux-2.6.31.7/include/linux/interrupt.h
+--- linux-2.6.31.7/include/linux/interrupt.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/interrupt.h 2009-12-08 17:39:44.242670977 -0500
+@@ -355,7 +355,7 @@ enum
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+-extern char *softirq_to_name[NR_SOFTIRQS];
++extern const char * const softirq_to_name[NR_SOFTIRQS];
+
+ /* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+diff -urNp linux-2.6.31.7/include/linux/jbd2.h linux-2.6.31.7/include/linux/jbd2.h
+--- linux-2.6.31.7/include/linux/jbd2.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/jbd2.h 2009-12-08 17:39:44.242670977 -0500
+@@ -66,7 +66,7 @@ extern u8 jbd2_journal_enable_debug;
+ } \
+ } while (0)
+ #else
+-#define jbd_debug(f, a...) /**/
++#define jbd_debug(f, a...) do {} while (0)
+ #endif
+
+ static inline void *jbd2_alloc(size_t size, gfp_t flags)
+diff -urNp linux-2.6.31.7/include/linux/jbd.h linux-2.6.31.7/include/linux/jbd.h
+--- linux-2.6.31.7/include/linux/jbd.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/jbd.h 2009-12-08 17:39:44.242670977 -0500
+@@ -66,7 +66,7 @@ extern u8 journal_enable_debug;
+ } \
+ } while (0)
+ #else
+-#define jbd_debug(f, a...) /**/
++#define jbd_debug(f, a...) do {} while (0)
+ #endif
+
+ static inline void *jbd_alloc(size_t size, gfp_t flags)
+diff -urNp linux-2.6.31.7/include/linux/kallsyms.h linux-2.6.31.7/include/linux/kallsyms.h
+--- linux-2.6.31.7/include/linux/kallsyms.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/kallsyms.h 2009-12-08 17:39:44.243804714 -0500
+@@ -15,7 +15,8 @@
+
+ struct module;
+
+-#ifdef CONFIG_KALLSYMS
++#ifndef __INCLUDED_BY_HIDESYM
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+
+@@ -92,6 +93,9 @@ static inline int lookup_symbol_attrs(un
+ /* Stupid that this does nothing, but I didn't create this mess. */
+ #define __print_symbol(fmt, addr)
+ #endif /*CONFIG_KALLSYMS*/
++#else /* when included by kallsyms.c, with HIDESYM enabled */
++extern void __print_symbol(const char *fmt, unsigned long address);
++#endif
+
+ /* This macro allows us to keep printk typechecking */
+ static void __check_printsym_format(const char *fmt, ...)
+diff -urNp linux-2.6.31.7/include/linux/kgdb.h linux-2.6.31.7/include/linux/kgdb.h
+--- linux-2.6.31.7/include/linux/kgdb.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/kgdb.h 2009-12-08 17:39:44.243804714 -0500
+@@ -259,12 +259,12 @@ struct kgdb_io {
+ void (*post_exception) (void);
+ };
+
+-extern struct kgdb_arch arch_kgdb_ops;
++extern const struct kgdb_arch arch_kgdb_ops;
+
+ extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+
+-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
+-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
++extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
++extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
+
+ extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
+ extern int kgdb_mem2hex(char *mem, char *buf, int count);
+diff -urNp linux-2.6.31.7/include/linux/kmemcheck.h linux-2.6.31.7/include/linux/kmemcheck.h
+--- linux-2.6.31.7/include/linux/kmemcheck.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/kmemcheck.h 2009-12-08 17:39:44.243804714 -0500
+@@ -137,13 +137,13 @@ static inline void kmemcheck_mark_initia
+ int name##_end[0];
+
+ #define kmemcheck_annotate_bitfield(ptr, name) \
+- do if (ptr) { \
++ if (ptr) { \
+ int _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+- } while (0)
++ }
+
+ #define kmemcheck_annotate_variable(var) \
+ do { \
+diff -urNp linux-2.6.31.7/include/linux/kobject.h linux-2.6.31.7/include/linux/kobject.h
+--- linux-2.6.31.7/include/linux/kobject.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/kobject.h 2009-12-08 17:39:44.244673293 -0500
+@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
+
+ struct kobj_type {
+ void (*release)(struct kobject *kobj);
+- struct sysfs_ops *sysfs_ops;
++ const struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+ };
+
+@@ -118,9 +118,9 @@ struct kobj_uevent_env {
+ };
+
+ struct kset_uevent_ops {
+- int (*filter)(struct kset *kset, struct kobject *kobj);
+- const char *(*name)(struct kset *kset, struct kobject *kobj);
+- int (*uevent)(struct kset *kset, struct kobject *kobj,
++ int (* const filter)(struct kset *kset, struct kobject *kobj);
++ const char *(* const name)(struct kset *kset, struct kobject *kobj);
++ int (* const uevent)(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env);
+ };
+
+@@ -132,7 +132,7 @@ struct kobj_attribute {
+ const char *buf, size_t count);
+ };
+
+-extern struct sysfs_ops kobj_sysfs_ops;
++extern const struct sysfs_ops kobj_sysfs_ops;
+
+ /**
+ * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
+@@ -155,14 +155,14 @@ struct kset {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+- struct kset_uevent_ops *uevent_ops;
++ const struct kset_uevent_ops *uevent_ops;
+ };
+
+ extern void kset_init(struct kset *kset);
+ extern int __must_check kset_register(struct kset *kset);
+ extern void kset_unregister(struct kset *kset);
+ extern struct kset * __must_check kset_create_and_add(const char *name,
+- struct kset_uevent_ops *u,
++ const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
+
+ static inline struct kset *to_kset(struct kobject *kobj)
+diff -urNp linux-2.6.31.7/include/linux/kvm_host.h linux-2.6.31.7/include/linux/kvm_host.h
+--- linux-2.6.31.7/include/linux/kvm_host.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/kvm_host.h 2009-12-08 17:39:44.244673293 -0500
+@@ -173,7 +173,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
+ void vcpu_load(struct kvm_vcpu *vcpu);
+ void vcpu_put(struct kvm_vcpu *vcpu);
+
+-int kvm_init(void *opaque, unsigned int vcpu_size,
++int kvm_init(const void *opaque, unsigned int vcpu_size,
+ struct module *module);
+ void kvm_exit(void);
+
+@@ -280,7 +280,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
+ struct kvm_guest_debug *dbg);
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+-int kvm_arch_init(void *opaque);
++int kvm_arch_init(const void *opaque);
+ void kvm_arch_exit(void);
+
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+diff -urNp linux-2.6.31.7/include/linux/libata.h linux-2.6.31.7/include/linux/libata.h
+--- linux-2.6.31.7/include/linux/libata.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/libata.h 2009-12-08 17:39:44.245754856 -0500
+@@ -64,11 +64,11 @@
+ #ifdef ATA_VERBOSE_DEBUG
+ #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+ #else
+-#define VPRINTK(fmt, args...)
++#define VPRINTK(fmt, args...) do {} while (0)
+ #endif /* ATA_VERBOSE_DEBUG */
+ #else
+-#define DPRINTK(fmt, args...)
+-#define VPRINTK(fmt, args...)
++#define DPRINTK(fmt, args...) do {} while (0)
++#define VPRINTK(fmt, args...) do {} while (0)
+ #endif /* ATA_DEBUG */
+
+ #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+@@ -460,10 +460,10 @@ struct ata_queued_cmd;
+
+ /* typedefs */
+ typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
+-typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline);
+-typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
++typedef int (* const ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline);
++typedef int (* ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline);
+-typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
++typedef void (* const ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
+
+ /*
+ * host pm policy: If you alter this, you also need to alter libata-scsi.c
+@@ -509,11 +509,11 @@ struct ata_ioports {
+
+ struct ata_host {
+ spinlock_t lock;
+- struct device *dev;
++ struct device *dev;
+ void __iomem * const *iomap;
+ unsigned int n_ports;
+ void *private_data;
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ unsigned long flags;
+ #ifdef CONFIG_ATA_ACPI
+ acpi_handle acpi_handle;
+@@ -693,7 +693,7 @@ struct ata_link {
+
+ struct ata_port {
+ struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
+- struct ata_port_operations *ops;
++ const struct ata_port_operations *ops;
+ spinlock_t *lock;
+ /* Flags owned by the EH context. Only EH should touch these once the
+ port is active */
+@@ -775,26 +775,26 @@ struct ata_port_operations {
+ /*
+ * Command execution
+ */
+- int (*qc_defer)(struct ata_queued_cmd *qc);
+- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+- void (*qc_prep)(struct ata_queued_cmd *qc);
+- unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
+- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
++ int (* const qc_defer)(struct ata_queued_cmd *qc);
++ int (* const check_atapi_dma)(struct ata_queued_cmd *qc);
++ void (* const qc_prep)(struct ata_queued_cmd *qc);
++ unsigned int (* const qc_issue)(struct ata_queued_cmd *qc);
++ bool (* const qc_fill_rtf)(struct ata_queued_cmd *qc);
+
+ /*
+ * Configuration and exception handling
+ */
+- int (*cable_detect)(struct ata_port *ap);
+- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+- void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
+- void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
+- int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
+- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
++ int (* const cable_detect)(struct ata_port *ap);
++ unsigned long (* const mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
++ void (* const set_piomode)(struct ata_port *ap, struct ata_device *dev);
++ void (* const set_dmamode)(struct ata_port *ap, struct ata_device *dev);
++ int (* const set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
++ unsigned int (* const read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+
+- void (*dev_config)(struct ata_device *dev);
++ void (* const dev_config)(struct ata_device *dev);
+
+- void (*freeze)(struct ata_port *ap);
+- void (*thaw)(struct ata_port *ap);
++ void (* const freeze)(struct ata_port *ap);
++ void (* const thaw)(struct ata_port *ap);
+ ata_prereset_fn_t prereset;
+ ata_reset_fn_t softreset;
+ ata_reset_fn_t hardreset;
+@@ -803,64 +803,64 @@ struct ata_port_operations {
+ ata_reset_fn_t pmp_softreset;
+ ata_reset_fn_t pmp_hardreset;
+ ata_postreset_fn_t pmp_postreset;
+- void (*error_handler)(struct ata_port *ap);
+- void (*lost_interrupt)(struct ata_port *ap);
+- void (*post_internal_cmd)(struct ata_queued_cmd *qc);
++ void (* const error_handler)(struct ata_port *ap);
++ void (* const lost_interrupt)(struct ata_port *ap);
++ void (* const post_internal_cmd)(struct ata_queued_cmd *qc);
+
+ /*
+ * Optional features
+ */
+- int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
+- int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
+- void (*pmp_attach)(struct ata_port *ap);
+- void (*pmp_detach)(struct ata_port *ap);
+- int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
+- void (*disable_pm)(struct ata_port *ap);
++ int (* const scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
++ int (* const scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
++ void (* const pmp_attach)(struct ata_port *ap);
++ void (* const pmp_detach)(struct ata_port *ap);
++ int (* const enable_pm)(struct ata_port *ap, enum link_pm policy);
++ void (* const disable_pm)(struct ata_port *ap);
+
+ /*
+ * Start, stop, suspend and resume
+ */
+- int (*port_suspend)(struct ata_port *ap, pm_message_t mesg);
+- int (*port_resume)(struct ata_port *ap);
+- int (*port_start)(struct ata_port *ap);
+- void (*port_stop)(struct ata_port *ap);
+- void (*host_stop)(struct ata_host *host);
++ int (* const port_suspend)(struct ata_port *ap, pm_message_t mesg);
++ int (* const port_resume)(struct ata_port *ap);
++ int (* const port_start)(struct ata_port *ap);
++ void (* const port_stop)(struct ata_port *ap);
++ void (* const host_stop)(struct ata_host *host);
+
+ #ifdef CONFIG_ATA_SFF
+ /*
+ * SFF / taskfile oriented ops
+ */
+- void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
+- u8 (*sff_check_status)(struct ata_port *ap);
+- u8 (*sff_check_altstatus)(struct ata_port *ap);
+- void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
+- void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
+- void (*sff_exec_command)(struct ata_port *ap,
++ void (* const sff_dev_select)(struct ata_port *ap, unsigned int device);
++ u8 (* sff_check_status)(struct ata_port *ap);
++ u8 (* const sff_check_altstatus)(struct ata_port *ap);
++ void (* sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
++ void (* sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
++ void (* sff_exec_command)(struct ata_port *ap,
+ const struct ata_taskfile *tf);
+- unsigned int (*sff_data_xfer)(struct ata_device *dev,
++ unsigned int (* sff_data_xfer)(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
+- u8 (*sff_irq_on)(struct ata_port *);
+- void (*sff_irq_clear)(struct ata_port *);
++ u8 (* const sff_irq_on)(struct ata_port *);
++ void (* const sff_irq_clear)(struct ata_port *);
+
+- void (*bmdma_setup)(struct ata_queued_cmd *qc);
+- void (*bmdma_start)(struct ata_queued_cmd *qc);
+- void (*bmdma_stop)(struct ata_queued_cmd *qc);
+- u8 (*bmdma_status)(struct ata_port *ap);
++ void (* const bmdma_setup)(struct ata_queued_cmd *qc);
++ void (* const bmdma_start)(struct ata_queued_cmd *qc);
++ void (* const bmdma_stop)(struct ata_queued_cmd *qc);
++ u8 (* const bmdma_status)(struct ata_port *ap);
+
+- void (*drain_fifo)(struct ata_queued_cmd *qc);
++ void (* const drain_fifo)(struct ata_queued_cmd *qc);
+ #endif /* CONFIG_ATA_SFF */
+
+- ssize_t (*em_show)(struct ata_port *ap, char *buf);
+- ssize_t (*em_store)(struct ata_port *ap, const char *message,
++ ssize_t (* const em_show)(struct ata_port *ap, char *buf);
++ ssize_t (* const em_store)(struct ata_port *ap, const char *message,
+ size_t size);
+- ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
+- ssize_t (*sw_activity_store)(struct ata_device *dev,
++ ssize_t (* const sw_activity_show)(struct ata_device *dev, char *buf);
++ ssize_t (* const sw_activity_store)(struct ata_device *dev,
+ enum sw_activity val);
+ /*
+ * Obsolete
+ */
+- void (*phy_reset)(struct ata_port *ap);
+- void (*eng_timeout)(struct ata_port *ap);
++ void (* const phy_reset)(struct ata_port *ap);
++ void (* const eng_timeout)(struct ata_port *ap);
+
+ /*
+ * ->inherits must be the last field and all the preceding
+@@ -875,7 +875,7 @@ struct ata_port_info {
+ unsigned long pio_mask;
+ unsigned long mwdma_mask;
+ unsigned long udma_mask;
+- struct ata_port_operations *port_ops;
++ const struct ata_port_operations *port_ops;
+ void *private_data;
+ };
+
+@@ -899,7 +899,7 @@ extern const unsigned long sata_deb_timi
+ extern const unsigned long sata_deb_timing_hotplug[];
+ extern const unsigned long sata_deb_timing_long[];
+
+-extern struct ata_port_operations ata_dummy_port_ops;
++extern const struct ata_port_operations ata_dummy_port_ops;
+ extern const struct ata_port_info ata_dummy_port_info;
+
+ static inline const unsigned long *
+@@ -945,7 +945,7 @@ extern int ata_host_activate(struct ata_
+ struct scsi_host_template *sht);
+ extern void ata_host_detach(struct ata_host *host);
+ extern void ata_host_init(struct ata_host *, struct device *,
+- unsigned long, struct ata_port_operations *);
++ unsigned long, const struct ata_port_operations *);
+ extern int ata_scsi_detect(struct scsi_host_template *sht);
+ extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
+diff -urNp linux-2.6.31.7/include/linux/lockd/bind.h linux-2.6.31.7/include/linux/lockd/bind.h
+--- linux-2.6.31.7/include/linux/lockd/bind.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/lockd/bind.h 2009-12-08 17:39:44.245754856 -0500
+@@ -29,7 +29,7 @@ struct nlmsvc_binding {
+ void (*fclose)(struct file *);
+ };
+
+-extern struct nlmsvc_binding * nlmsvc_ops;
++extern const struct nlmsvc_binding * nlmsvc_ops;
+
+ /*
+ * Similar to nfs_client_initdata, but without the NFS-specific
+diff -urNp linux-2.6.31.7/include/linux/lockd/lockd.h linux-2.6.31.7/include/linux/lockd/lockd.h
+--- linux-2.6.31.7/include/linux/lockd/lockd.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/lockd/lockd.h 2009-12-08 17:39:44.245754856 -0500
+@@ -395,7 +395,7 @@ static inline int nlm_compare_locks(cons
+ &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
+ }
+
+-extern struct lock_manager_operations nlmsvc_lock_operations;
++extern const struct lock_manager_operations nlmsvc_lock_operations;
+
+ #endif /* __KERNEL__ */
+
+diff -urNp linux-2.6.31.7/include/linux/mm.h linux-2.6.31.7/include/linux/mm.h
+--- linux-2.6.31.7/include/linux/mm.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/mm.h 2009-12-08 17:39:44.246808591 -0500
+@@ -104,6 +104,10 @@ extern unsigned int kobjsize(const void
+ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#define VM_PAGEEXEC 0x80000000 /* vma->vm_page_prot needs special handling */
++#endif
++
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+ #endif
+@@ -871,6 +875,8 @@ struct shrinker {
+ extern void register_shrinker(struct shrinker *);
+ extern void unregister_shrinker(struct shrinker *);
+
++pgprot_t vm_get_page_prot(unsigned long vm_flags);
++
+ int vma_wants_writenotify(struct vm_area_struct *vma);
+
+ extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+@@ -1141,6 +1147,7 @@ out:
+ }
+
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+
+ extern unsigned long do_brk(unsigned long, unsigned long);
+
+@@ -1195,6 +1202,10 @@ extern struct vm_area_struct * find_vma(
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -1211,7 +1222,6 @@ static inline unsigned long vma_pages(st
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ }
+
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+@@ -1303,5 +1313,12 @@ void vmemmap_populate_print_last(void);
+ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size);
+ extern void refund_locked_memory(struct mm_struct *mm, size_t size);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff -urNp linux-2.6.31.7/include/linux/mm_types.h linux-2.6.31.7/include/linux/mm_types.h
+--- linux-2.6.31.7/include/linux/mm_types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/mm_types.h 2009-12-08 17:39:44.246808591 -0500
+@@ -171,7 +171,7 @@ struct vm_area_struct {
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
+
+ /* Function pointers to deal with this struct. */
+- struct vm_operations_struct * vm_ops;
++ const struct vm_operations_struct * vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+@@ -186,6 +186,8 @@ struct vm_area_struct {
+ #ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
++
++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+ };
+
+ struct core_thread {
+@@ -286,6 +288,24 @@ struct mm_struct {
+ #ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_notifier_mm *mmu_notifier_mm;
+ #endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+diff -urNp linux-2.6.31.7/include/linux/mmu_notifier.h linux-2.6.31.7/include/linux/mmu_notifier.h
+--- linux-2.6.31.7/include/linux/mmu_notifier.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/mmu_notifier.h 2009-12-08 17:39:44.246808591 -0500
+@@ -217,12 +217,12 @@ static inline void mmu_notifier_mm_destr
+ */
+ #define ptep_clear_flush_notify(__vma, __address, __ptep) \
+ ({ \
+- pte_t __pte; \
++ pte_t ___pte; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
+ mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
+- __pte; \
++ ___pte; \
+ })
+
+ #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
+diff -urNp linux-2.6.31.7/include/linux/mod_devicetable.h linux-2.6.31.7/include/linux/mod_devicetable.h
+--- linux-2.6.31.7/include/linux/mod_devicetable.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/mod_devicetable.h 2009-12-08 17:39:44.247720843 -0500
+@@ -12,7 +12,7 @@
+ typedef unsigned long kernel_ulong_t;
+ #endif
+
+-#define PCI_ANY_ID (~0)
++#define PCI_ANY_ID ((__u16)~0)
+
+ struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+@@ -131,7 +131,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+
+-#define HID_ANY_ID (~0)
++#define HID_ANY_ID (~0U)
+
+ struct hid_device_id {
+ __u16 bus;
+diff -urNp linux-2.6.31.7/include/linux/module.h linux-2.6.31.7/include/linux/module.h
+--- linux-2.6.31.7/include/linux/module.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/module.h 2009-12-08 17:39:44.247720843 -0500
+@@ -283,16 +283,16 @@ struct module
+ int (*init)(void);
+
+ /* If this is non-NULL, vfree after init() returns */
+- void *module_init;
++ void *module_init_rx, *module_init_rw;
+
+ /* Here is the actual code + data, vfree'd on unload. */
+- void *module_core;
++ void *module_core_rx, *module_core_rw;
+
+ /* Here are the sizes of the init and core sections */
+- unsigned int init_size, core_size;
++ unsigned int init_size_rw, core_size_rw;
+
+ /* The size of the executable code in each section. */
+- unsigned int init_text_size, core_text_size;
++ unsigned int init_size_rx, core_size_rx;
+
+ /* Arch-specific module values */
+ struct mod_arch_specific arch;
+@@ -389,16 +389,46 @@ struct module *__module_address(unsigned
+ bool is_module_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (ktla_ktva(addr) >= (unsigned long)start &&
++ ktla_ktva(addr) < (unsigned long)start + size)
++ return 1;
++#endif
++
++ return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_core_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
++}
++
++static inline int within_module_core_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
++}
++
++static inline int within_module_init_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
++}
++
++static inline int within_module_init_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
++}
++
+ static inline int within_module_core(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_core <= addr &&
+- addr < (unsigned long)mod->module_core + mod->core_size;
++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
+ }
+
+ static inline int within_module_init(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_init <= addr &&
+- addr < (unsigned long)mod->module_init + mod->init_size;
++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
+ }
+
+ /* Search for module by name: must hold module_mutex. */
+@@ -451,7 +481,11 @@ void symbol_put_addr(void *addr);
+ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
+ {
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_X86_32
++ return (local_t *) (mod->refptr + __per_cpu_offset[cpu]);
++#else
+ return (local_t *) (mod->refptr + per_cpu_offset(cpu));
++#endif
+ #else
+ return &mod->ref;
+ #endif
+diff -urNp linux-2.6.31.7/include/linux/moduleloader.h linux-2.6.31.7/include/linux/moduleloader.h
+--- linux-2.6.31.7/include/linux/moduleloader.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/moduleloader.h 2009-12-08 17:39:44.247720843 -0500
+@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
+ sections. Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
++
+ /* Free memory returned from module_alloc. */
+ void module_free(struct module *mod, void *module_region);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region);
++#else
++#define module_free_exec(x, y) module_free(x, y)
++#endif
++
+ /* Apply the given relocation to the (simplified) ELF. Return -error
+ or 0. */
+ int apply_relocate(Elf_Shdr *sechdrs,
+diff -urNp linux-2.6.31.7/include/linux/namei.h linux-2.6.31.7/include/linux/namei.h
+--- linux-2.6.31.7/include/linux/namei.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/namei.h 2009-12-08 17:39:44.247720843 -0500
+@@ -22,7 +22,7 @@ struct nameidata {
+ unsigned int flags;
+ int last_type;
+ unsigned depth;
+- char *saved_names[MAX_NESTED_LINKS + 1];
++ const char *saved_names[MAX_NESTED_LINKS + 1];
+
+ /* Intent data */
+ union {
+@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
+ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+ extern void unlock_rename(struct dentry *, struct dentry *);
+
+-static inline void nd_set_link(struct nameidata *nd, char *path)
++static inline void nd_set_link(struct nameidata *nd, const char *path)
+ {
+ nd->saved_names[nd->depth] = path;
+ }
+
+-static inline char *nd_get_link(struct nameidata *nd)
++static inline const char *nd_get_link(struct nameidata *nd)
+ {
+ return nd->saved_names[nd->depth];
+ }
+diff -urNp linux-2.6.31.7/include/linux/nfsd/nfsd.h linux-2.6.31.7/include/linux/nfsd/nfsd.h
+--- linux-2.6.31.7/include/linux/nfsd/nfsd.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/nfsd/nfsd.h 2009-12-08 17:39:44.247720843 -0500
+@@ -57,7 +57,7 @@ extern u32 nfsd_supported_minorversion
+ extern struct mutex nfsd_mutex;
+ extern struct svc_serv *nfsd_serv;
+
+-extern struct seq_operations nfs_exports_op;
++extern const struct seq_operations nfs_exports_op;
+
+ /*
+ * Function prototypes.
+diff -urNp linux-2.6.31.7/include/linux/nodemask.h linux-2.6.31.7/include/linux/nodemask.h
+--- linux-2.6.31.7/include/linux/nodemask.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/nodemask.h 2009-12-08 17:39:44.248812321 -0500
+@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
+
+ #define any_online_node(mask) \
+ ({ \
+- int node; \
+- for_each_node_mask(node, (mask)) \
+- if (node_online(node)) \
++ int __node; \
++ for_each_node_mask(__node, (mask)) \
++ if (node_online(__node)) \
+ break; \
+- node; \
++ __node; \
+ })
+
+ #define num_online_nodes() num_node_state(N_ONLINE)
+diff -urNp linux-2.6.31.7/include/linux/oprofile.h linux-2.6.31.7/include/linux/oprofile.h
+--- linux-2.6.31.7/include/linux/oprofile.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/oprofile.h 2009-12-08 17:39:44.248812321 -0500
+@@ -128,7 +128,7 @@ int oprofilefs_create_ro_ulong(struct su
+
+ /** Create a file for read-only access to an atomic_t. */
+ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+- char const * name, atomic_t * val);
++ char const * name, atomic_unchecked_t * val);
+
+ /** create a directory */
+ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+diff -urNp linux-2.6.31.7/include/linux/pipe_fs_i.h linux-2.6.31.7/include/linux/pipe_fs_i.h
+--- linux-2.6.31.7/include/linux/pipe_fs_i.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/pipe_fs_i.h 2009-12-08 17:39:44.248812321 -0500
+@@ -46,9 +46,9 @@ struct pipe_inode_info {
+ wait_queue_head_t wait;
+ unsigned int nrbufs, curbuf;
+ struct page *tmp_page;
+- unsigned int readers;
+- unsigned int writers;
+- unsigned int waiting_writers;
++ atomic_t readers;
++ atomic_t writers;
++ atomic_t waiting_writers;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct fasync_struct *fasync_readers;
+diff -urNp linux-2.6.31.7/include/linux/poison.h linux-2.6.31.7/include/linux/poison.h
+--- linux-2.6.31.7/include/linux/poison.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/poison.h 2009-12-08 17:39:44.249806573 -0500
+@@ -7,8 +7,8 @@
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+-#define LIST_POISON1 ((void *) 0x00100100)
+-#define LIST_POISON2 ((void *) 0x00200200)
++#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
++#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
+
+ /********** include/linux/timer.h **********/
+ /*
+diff -urNp linux-2.6.31.7/include/linux/proc_fs.h linux-2.6.31.7/include/linux/proc_fs.h
+--- linux-2.6.31.7/include/linux/proc_fs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/proc_fs.h 2009-12-08 17:39:44.249806573 -0500
+@@ -146,6 +146,19 @@ static inline struct proc_dir_entry *pro
+ return proc_create_data(name, mode, parent, proc_fops, NULL);
+ }
+
++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
++ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
++{
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
++#else
++ return proc_create_data(name, mode, parent, proc_fops, NULL);
++#endif
++}
++
++
+ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
+ mode_t mode, struct proc_dir_entry *base,
+ read_proc_t *read_proc, void * data)
+diff -urNp linux-2.6.31.7/include/linux/random.h linux-2.6.31.7/include/linux/random.h
+--- linux-2.6.31.7/include/linux/random.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/random.h 2009-12-08 17:39:44.249806573 -0500
+@@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
+ u32 random32(void);
+ void srandom32(u32 seed);
+
++static inline unsigned long pax_get_random_long(void)
++{
++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
++}
++
+ #endif /* __KERNEL___ */
+
+ #endif /* _LINUX_RANDOM_H */
+diff -urNp linux-2.6.31.7/include/linux/reiserfs_fs.h linux-2.6.31.7/include/linux/reiserfs_fs.h
+--- linux-2.6.31.7/include/linux/reiserfs_fs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/reiserfs_fs.h 2009-12-08 17:39:44.250809200 -0500
+@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
+ #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
+@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
+ */
+
+ struct item_operations {
+- int (*bytes_number) (struct item_head * ih, int block_size);
+- void (*decrement_key) (struct cpu_key *);
+- int (*is_left_mergeable) (struct reiserfs_key * ih,
++ int (* const bytes_number) (struct item_head * ih, int block_size);
++ void (* const decrement_key) (struct cpu_key *);
++ int (* const is_left_mergeable) (struct reiserfs_key * ih,
+ unsigned long bsize);
+- void (*print_item) (struct item_head *, char *item);
+- void (*check_item) (struct item_head *, char *item);
++ void (* const print_item) (struct item_head *, char *item);
++ void (* const check_item) (struct item_head *, char *item);
+
+- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
++ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
+ int is_affected, int insert_size);
+- int (*check_left) (struct virtual_item * vi, int free,
++ int (* const check_left) (struct virtual_item * vi, int free,
+ int start_skip, int end_skip);
+- int (*check_right) (struct virtual_item * vi, int free);
+- int (*part_size) (struct virtual_item * vi, int from, int to);
+- int (*unit_num) (struct virtual_item * vi);
+- void (*print_vi) (struct virtual_item * vi);
++ int (* const check_right) (struct virtual_item * vi, int free);
++ int (* const part_size) (struct virtual_item * vi, int from, int to);
++ int (* const unit_num) (struct virtual_item * vi);
++ void (* const print_vi) (struct virtual_item * vi);
+ };
+
+-extern struct item_operations *item_ops[TYPE_ANY + 1];
++extern const struct item_operations *item_ops[TYPE_ANY + 1];
+
+ #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
+ #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
+diff -urNp linux-2.6.31.7/include/linux/reiserfs_fs_sb.h linux-2.6.31.7/include/linux/reiserfs_fs_sb.h
+--- linux-2.6.31.7/include/linux/reiserfs_fs_sb.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/reiserfs_fs_sb.h 2009-12-08 17:39:44.250809200 -0500
+@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
+ /* Comment? -Hans */
+ wait_queue_head_t s_wait;
+ /* To be obsoleted soon by per buffer seals.. -Hans */
+- atomic_t s_generation_counter; // increased by one every time the
++ atomic_unchecked_t s_generation_counter; // increased by one every time the
+ // tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
+diff -urNp linux-2.6.31.7/include/linux/sched.h linux-2.6.31.7/include/linux/sched.h
+--- linux-2.6.31.7/include/linux/sched.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/sched.h 2009-12-08 17:39:44.251805632 -0500
+@@ -99,6 +99,7 @@ struct bio;
+ struct fs_struct;
+ struct bts_context;
+ struct perf_counter_context;
++struct linux_binprm;
+
+ /*
+ * List of flags we want to share for kernel threads,
+@@ -629,6 +630,15 @@ struct signal_struct {
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++ u32 curr_ip;
++ u32 gr_saddr;
++ u32 gr_daddr;
++ u16 gr_sport;
++ u16 gr_dport;
++ u8 used_accept:1;
++#endif
+ };
+
+ /* Context switch must be unlocked if interrupts are to be enabled */
+@@ -1165,7 +1175,7 @@ struct sched_rt_entity {
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+- void *stack;
++ struct thread_info *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
+@@ -1269,8 +1279,8 @@ struct task_struct {
+ struct list_head thread_group;
+
+ struct completion *vfork_done; /* for vfork() */
+- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+@@ -1284,15 +1294,6 @@ struct task_struct {
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+-/* process credentials */
+- const struct cred *real_cred; /* objective and real subjective task
+- * credentials (COW) */
+- const struct cred *cred; /* effective (overridable) subjective task
+- * credentials (COW) */
+- struct mutex cred_guard_mutex; /* guard against foreign influences on
+- * credential calculations
+- * (notably. ptrace) */
+-
+ char comm[TASK_COMM_LEN]; /* executable name excluding path
+ - access with [gs]et_task_comm (which lock
+ it with task_lock())
+@@ -1429,6 +1430,16 @@ struct task_struct {
+ struct mutex perf_counter_mutex;
+ struct list_head perf_counter_list;
+ #endif
++
++/* process credentials */
++ const struct cred *real_cred; /* objective and real subjective task
++ * credentials (COW) */
++ const struct cred *cred; /* effective (overridable) subjective task
++ * credentials (COW) */
++ struct mutex cred_guard_mutex; /* guard against foreign influences on
++ * credential calculations
++ * (notably. ptrace) */
++
+ #ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy; /* Protected by alloc_lock */
+ short il_next;
+@@ -1480,8 +1491,66 @@ struct task_struct {
+ /* bitmask of trace recursion */
+ unsigned long trace_recursion;
+ #endif /* CONFIG_TRACING */
++
++#ifdef CONFIG_GRKERNSEC
++ /* grsecurity */
++ struct acl_subject_label *acl;
++ struct acl_role_label *role;
++ struct file *exec_file;
++ u16 acl_role_id;
++ u8 acl_sp_role;
++ u8 is_writable;
++ u8 brute;
++#endif
++
+ };
+
++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern unsigned int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++ if (likely(tsk->mm))
++ return tsk->mm->pax_flags;
++ else
++ return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++ if (likely(tsk->mm)) {
++ tsk->mm->pax_flags = flags;
++ return 0;
++ }
++ return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++void pax_report_insns(void *pc, void *sp);
++void pax_report_refcount_overflow(struct pt_regs *regs);
++void pax_report_leak_to_user(const void *ptr, unsigned long len);
++void pax_report_overflow_from_user(const void *ptr, unsigned long len);
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+
+@@ -2046,7 +2115,7 @@ extern void __cleanup_sighand(struct sig
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+
+-extern NORET_TYPE void do_group_exit(int);
++extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
+
+ extern void daemonize(const char *, ...);
+ extern int allow_signal(int);
+@@ -2159,8 +2228,8 @@ static inline void unlock_task_sighand(s
+
+ #ifndef __HAVE_THREAD_FUNCTIONS
+
+-#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+-#define task_stack_page(task) ((task)->stack)
++#define task_thread_info(task) ((task)->stack)
++#define task_stack_page(task) ((void *)(task)->stack)
+
+ static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
+ {
+@@ -2175,7 +2244,7 @@ static inline unsigned long *end_of_stac
+
+ #endif
+
+-static inline int object_is_on_stack(void *obj)
++static inline int object_is_on_stack(const void *obj)
+ {
+ void *stack = task_stack_page(current);
+
+diff -urNp linux-2.6.31.7/include/linux/screen_info.h linux-2.6.31.7/include/linux/screen_info.h
+--- linux-2.6.31.7/include/linux/screen_info.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/screen_info.h 2009-12-08 17:39:44.251805632 -0500
+@@ -42,7 +42,8 @@ struct screen_info {
+ __u16 pages; /* 0x32 */
+ __u16 vesa_attributes; /* 0x34 */
+ __u32 capabilities; /* 0x36 */
+- __u8 _reserved[6]; /* 0x3a */
++ __u16 vesapm_size; /* 0x3a */
++ __u8 _reserved[4]; /* 0x3c */
+ } __attribute__((packed));
+
+ #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+diff -urNp linux-2.6.31.7/include/linux/security.h linux-2.6.31.7/include/linux/security.h
+--- linux-2.6.31.7/include/linux/security.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/security.h 2009-12-08 17:39:44.252810633 -0500
+@@ -34,6 +34,7 @@
+ #include <linux/key.h>
+ #include <linux/xfrm.h>
+ #include <linux/gfp.h>
++#include <linux/grsecurity.h>
+ #include <net/flow.h>
+
+ /* Maximum number of letters for an LSM name string */
+diff -urNp linux-2.6.31.7/include/linux/shm.h linux-2.6.31.7/include/linux/shm.h
+--- linux-2.6.31.7/include/linux/shm.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/shm.h 2009-12-08 17:39:44.252810633 -0500
+@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
+ pid_t shm_cprid;
+ pid_t shm_lprid;
+ struct user_struct *mlock_user;
++#ifdef CONFIG_GRKERNSEC
++ time_t shm_createtime;
++ pid_t shm_lapid;
++#endif
+ };
+
+ /* shm_mode upper byte flags */
+diff -urNp linux-2.6.31.7/include/linux/slab.h linux-2.6.31.7/include/linux/slab.h
+--- linux-2.6.31.7/include/linux/slab.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/slab.h 2009-12-08 17:39:44.253805703 -0500
+@@ -11,6 +11,7 @@
+
+ #include <linux/gfp.h>
+ #include <linux/types.h>
++#include <linux/err.h>
+
+ /*
+ * Flags to pass to kmem_cache_create().
+@@ -82,10 +83,13 @@
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR \
++({ \
++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++ (void *)(-MAX_ERRNO-1L); \
++})
+
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+- (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) (!(x) || (x) == ZERO_SIZE_PTR)
+
+ /*
+ * struct kmem_cache related prototypes
+@@ -138,6 +142,7 @@ void * __must_check krealloc(const void
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++void check_object_size(const void *ptr, unsigned long n, bool to);
+
+ /*
+ * Allocator specific definitions. These are mainly used to establish optimized
+@@ -328,4 +333,37 @@ static inline void *kzalloc_node(size_t
+
+ void __init kmem_cache_init_late(void);
+
++#define kmalloc(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = kmalloc((size_t)___x, (y)); \
++ ___retval; \
++})
++
++#define kmalloc_node(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = kmalloc_node((size_t)___x, (y), (z));\
++ ___retval; \
++})
++
++#define kzalloc(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = kzalloc((size_t)___x, (y)); \
++ ___retval; \
++})
++
+ #endif /* _LINUX_SLAB_H */
+diff -urNp linux-2.6.31.7/include/linux/slub_def.h linux-2.6.31.7/include/linux/slub_def.h
+--- linux-2.6.31.7/include/linux/slub_def.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/slub_def.h 2009-12-08 17:39:44.253805703 -0500
+@@ -86,7 +86,7 @@ struct kmem_cache {
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+- int refcount; /* Refcount for slab cache destroy */
++ atomic_t refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *);
+ int inuse; /* Offset to metadata */
+ int align; /* Alignment */
+diff -urNp linux-2.6.31.7/include/linux/sonet.h linux-2.6.31.7/include/linux/sonet.h
+--- linux-2.6.31.7/include/linux/sonet.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/sonet.h 2009-12-08 17:39:44.253805703 -0500
+@@ -61,7 +61,7 @@ struct sonet_stats {
+ #include <asm/atomic.h>
+
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -urNp linux-2.6.31.7/include/linux/suspend.h linux-2.6.31.7/include/linux/suspend.h
+--- linux-2.6.31.7/include/linux/suspend.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/suspend.h 2009-12-08 17:39:44.253805703 -0500
+@@ -120,7 +120,7 @@ struct platform_suspend_ops {
+ * suspend_set_ops - set platform dependent suspend operations
+ * @ops: The new suspend operations to set.
+ */
+-extern void suspend_set_ops(struct platform_suspend_ops *ops);
++extern void suspend_set_ops(const struct platform_suspend_ops *ops);
+ extern int suspend_valid_only_mem(suspend_state_t state);
+
+ /**
+@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
+ extern void swsusp_unset_page_free(struct page *);
+ extern unsigned long get_safe_page(gfp_t gfp_mask);
+
+-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
++extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
+ extern int hibernate(void);
+ extern bool system_entering_hibernation(void);
+ #else /* CONFIG_HIBERNATION */
+@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
+ static inline void swsusp_set_page_free(struct page *p) {}
+ static inline void swsusp_unset_page_free(struct page *p) {}
+
+-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
++static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
+ static inline int hibernate(void) { return -ENOSYS; }
+ static inline bool system_entering_hibernation(void) { return false; }
+ #endif /* CONFIG_HIBERNATION */
+diff -urNp linux-2.6.31.7/include/linux/sysctl.h linux-2.6.31.7/include/linux/sysctl.h
+--- linux-2.6.31.7/include/linux/sysctl.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/sysctl.h 2009-12-08 17:39:44.254748505 -0500
+@@ -165,7 +165,11 @@ enum
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ };
+
+-
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+diff -urNp linux-2.6.31.7/include/linux/sysfs.h linux-2.6.31.7/include/linux/sysfs.h
+--- linux-2.6.31.7/include/linux/sysfs.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/sysfs.h 2009-12-08 17:39:44.254748505 -0500
+@@ -75,8 +75,8 @@ struct bin_attribute {
+ };
+
+ struct sysfs_ops {
+- ssize_t (*show)(struct kobject *, struct attribute *,char *);
+- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
++ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
++ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
+ };
+
+ struct sysfs_dirent;
+diff -urNp linux-2.6.31.7/include/linux/thread_info.h linux-2.6.31.7/include/linux/thread_info.h
+--- linux-2.6.31.7/include/linux/thread_info.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/thread_info.h 2009-12-08 17:39:44.254748505 -0500
+@@ -23,7 +23,7 @@ struct restart_block {
+ };
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+- u32 *uaddr;
++ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+diff -urNp linux-2.6.31.7/include/linux/tty_ldisc.h linux-2.6.31.7/include/linux/tty_ldisc.h
+--- linux-2.6.31.7/include/linux/tty_ldisc.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/tty_ldisc.h 2009-12-08 17:39:44.254748505 -0500
+@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
+
+ struct module *owner;
+
+- int refcount;
++ atomic_t refcount;
+ };
+
+ struct tty_ldisc {
+diff -urNp linux-2.6.31.7/include/linux/types.h linux-2.6.31.7/include/linux/types.h
+--- linux-2.6.31.7/include/linux/types.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/types.h 2009-12-08 17:39:44.255805741 -0500
+@@ -191,10 +191,26 @@ typedef struct {
+ volatile int counter;
+ } atomic_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ volatile int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+ volatile long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ volatile long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+
+ struct ustat {
+diff -urNp linux-2.6.31.7/include/linux/uaccess.h linux-2.6.31.7/include/linux/uaccess.h
+--- linux-2.6.31.7/include/linux/uaccess.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/uaccess.h 2009-12-08 17:39:44.255805741 -0500
+@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
+ long ret; \
+ mm_segment_t old_fs = get_fs(); \
+ \
+- set_fs(KERNEL_DS); \
+ pagefault_disable(); \
++ set_fs(KERNEL_DS); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
+ set_fs(old_fs); \
++ pagefault_enable(); \
+ ret; \
+ })
+
+@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-extern long probe_kernel_read(void *dst, void *src, size_t size);
++extern long probe_kernel_read(void *dst, const void *src, size_t size);
+
+ /*
+ * probe_kernel_write(): safely attempt to write to a location
+@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-extern long probe_kernel_write(void *dst, void *src, size_t size);
++extern long probe_kernel_write(void *dst, const void *src, size_t size);
+
+ #endif /* __LINUX_UACCESS_H__ */
+diff -urNp linux-2.6.31.7/include/linux/vmalloc.h linux-2.6.31.7/include/linux/vmalloc.h
+--- linux-2.6.31.7/include/linux/vmalloc.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/linux/vmalloc.h 2009-12-08 17:39:44.255805741 -0500
+@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
+ #define VM_MAP 0x00000004 /* vmap()ed pages */
+ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
++#endif
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+
+ /*
+@@ -115,4 +120,81 @@ extern rwlock_t vmlist_lock;
+ extern struct vm_struct *vmlist;
+ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+
++#define vmalloc(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc((unsigned long)___x); \
++ ___retval; \
++})
++
++#define __vmalloc(x, y, z) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
++ ___retval; \
++})
++
++#define vmalloc_user(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_user((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_exec(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_exec((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_node(x, y) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_node((unsigned long)___x, (y));\
++ ___retval; \
++})
++
++#define vmalloc_32(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_32((unsigned long)___x); \
++ ___retval; \
++})
++
++#define vmalloc_32_user(x) \
++({ \
++ void *___retval; \
++ intoverflow_t ___x = (intoverflow_t)x; \
++ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
++ ___retval = NULL; \
++ else \
++ ___retval = vmalloc_32_user((unsigned long)___x);\
++ ___retval; \
++})
++
+ #endif /* _LINUX_VMALLOC_H */
+diff -urNp linux-2.6.31.7/include/net/irda/ircomm_tty.h linux-2.6.31.7/include/net/irda/ircomm_tty.h
+--- linux-2.6.31.7/include/net/irda/ircomm_tty.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/net/irda/ircomm_tty.h 2009-12-08 17:39:44.256806986 -0500
+@@ -105,8 +105,8 @@ struct ircomm_tty_cb {
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+- int open_count;
+- int blocked_open; /* # of blocked opens */
++ atomic_t open_count;
++ atomic_t blocked_open; /* # of blocked opens */
+
+ /* Protect concurent access to :
+ * o self->open_count
+diff -urNp linux-2.6.31.7/include/net/sctp/sctp.h linux-2.6.31.7/include/net/sctp/sctp.h
+--- linux-2.6.31.7/include/net/sctp/sctp.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/net/sctp/sctp.h 2009-12-08 17:39:44.256806986 -0500
+@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
+
+ #else /* SCTP_DEBUG */
+
+-#define SCTP_DEBUG_PRINTK(whatever...)
+-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
+ #define SCTP_ENABLE_DEBUG
+ #define SCTP_DISABLE_DEBUG
+ #define SCTP_ASSERT(expr, str, func)
+diff -urNp linux-2.6.31.7/include/net/tcp.h linux-2.6.31.7/include/net/tcp.h
+--- linux-2.6.31.7/include/net/tcp.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/net/tcp.h 2009-12-08 17:39:44.256806986 -0500
+@@ -667,26 +667,26 @@ struct tcp_congestion_ops {
+ unsigned long flags;
+
+ /* initialize private data (optional) */
+- void (*init)(struct sock *sk);
++ void (* const init)(struct sock *sk);
+ /* cleanup private data (optional) */
+- void (*release)(struct sock *sk);
++ void (* const release)(struct sock *sk);
+
+ /* return slow start threshold (required) */
+- u32 (*ssthresh)(struct sock *sk);
++ u32 (* const ssthresh)(struct sock *sk);
+ /* lower bound for congestion window (optional) */
+- u32 (*min_cwnd)(const struct sock *sk);
++ u32 (* const min_cwnd)(const struct sock *sk);
+ /* do new cwnd calculation (required) */
+- void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
++ void (* const cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
+ /* call before changing ca_state (optional) */
+- void (*set_state)(struct sock *sk, u8 new_state);
++ void (* const set_state)(struct sock *sk, u8 new_state);
+ /* call when cwnd event occurs (optional) */
+- void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
++ void (* const cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* new value of cwnd after loss (optional) */
+- u32 (*undo_cwnd)(struct sock *sk);
++ u32 (* const undo_cwnd)(struct sock *sk);
+ /* hook for packet ack accounting (optional) */
+- void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
++ void (* const pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
+ /* get info for inet_diag (optional) */
+- void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
++ void (* const get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+
+ char name[TCP_CA_NAME_MAX];
+ struct module *owner;
+diff -urNp linux-2.6.31.7/include/sound/ac97_codec.h linux-2.6.31.7/include/sound/ac97_codec.h
+--- linux-2.6.31.7/include/sound/ac97_codec.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/sound/ac97_codec.h 2009-12-08 17:39:44.257806274 -0500
+@@ -474,7 +474,7 @@ struct snd_ac97_template {
+
+ struct snd_ac97 {
+ /* -- lowlevel (hardware) driver specific -- */
+- struct snd_ac97_build_ops * build_ops;
++ const struct snd_ac97_build_ops * build_ops;
+ void *private_data;
+ void (*private_free) (struct snd_ac97 *ac97);
+ /* --- */
+diff -urNp linux-2.6.31.7/include/sound/core.h linux-2.6.31.7/include/sound/core.h
+--- linux-2.6.31.7/include/sound/core.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/sound/core.h 2009-12-08 17:39:44.257806274 -0500
+@@ -430,7 +430,7 @@ static inline int __snd_bug_on(int cond)
+ */
+ #define snd_printdd(format, args...) snd_printk(format, ##args)
+ #else
+-#define snd_printdd(format, args...) /* nothing */
++#define snd_printdd(format, args...) do {} while (0)
+ #endif
+
+
+diff -urNp linux-2.6.31.7/include/video/uvesafb.h linux-2.6.31.7/include/video/uvesafb.h
+--- linux-2.6.31.7/include/video/uvesafb.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/include/video/uvesafb.h 2009-12-08 17:39:44.257806274 -0500
+@@ -177,6 +177,7 @@ struct uvesafb_par {
+ u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
+ u8 pmi_setpal; /* PMI for palette changes */
+ u16 *pmi_base; /* protected mode interface location */
++ u8 *pmi_code; /* protected mode code location */
+ void *pmi_start;
+ void *pmi_pal;
+ u8 *vbe_state_orig; /*
+diff -urNp linux-2.6.31.7/init/do_mounts.c linux-2.6.31.7/init/do_mounts.c
+--- linux-2.6.31.7/init/do_mounts.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/do_mounts.c 2009-12-08 17:39:44.258741124 -0500
+@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
+
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+- int err = sys_mount(name, "/root", fs, flags, data);
++ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
+ if (err)
+ return err;
+
+- sys_chdir("/root");
++ sys_chdir((__force char __user *)"/root");
+ ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
+ printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
+ current->fs->pwd.mnt->mnt_sb->s_type->name,
+@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
+ va_start(args, fmt);
+ vsprintf(buf, fmt, args);
+ va_end(args);
+- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ sys_close(fd);
+ }
+ printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+- fd = sys_open("/dev/console", O_RDWR, 0);
++ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, TCGETS, (long)&termios);
+ termios.c_lflag &= ~ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+- sys_read(fd, &c, 1);
++ sys_read(fd, (char __user *)&c, 1);
+ termios.c_lflag |= ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+ sys_close(fd);
+@@ -415,7 +415,7 @@ void __init prepare_namespace(void)
+
+ mount_root();
+ out:
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot(".");
++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((__force char __user *)".");
+ }
+
+diff -urNp linux-2.6.31.7/init/do_mounts.h linux-2.6.31.7/init/do_mounts.h
+--- linux-2.6.31.7/init/do_mounts.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/do_mounts.h 2009-12-08 17:39:44.258741124 -0500
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+
+ static inline int create_dev(char *name, dev_t dev)
+ {
+- sys_unlink(name);
+- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++ sys_unlink((__force char __user *)name);
++ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+ struct stat64 stat;
+- if (sys_stat64(name, &stat) != 0)
++ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+diff -urNp linux-2.6.31.7/init/do_mounts_initrd.c linux-2.6.31.7/init/do_mounts_initrd.c
+--- linux-2.6.31.7/init/do_mounts_initrd.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/do_mounts_initrd.c 2009-12-08 17:39:44.259699939 -0500
+@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
+ sys_close(old_fd);sys_close(root_fd);
+ sys_close(0);sys_close(1);sys_close(2);
+ sys_setsid();
+- (void) sys_open("/dev/console",O_RDWR,0);
++ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
+ (void) sys_dup(0);
+ (void) sys_dup(0);
+ return kernel_execve(shell, argv, envp_init);
+@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
+ create_dev("/dev/root.old", Root_RAM0);
+ /* mount initrd on rootfs' /root */
+ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+- sys_mkdir("/old", 0700);
+- root_fd = sys_open("/", 0, 0);
+- old_fd = sys_open("/old", 0, 0);
++ sys_mkdir((__force const char __user *)"/old", 0700);
++ root_fd = sys_open((__force const char __user *)"/", 0, 0);
++ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
+ /* move initrd over / and chdir/chroot in initrd root */
+- sys_chdir("/root");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot(".");
++ sys_chdir((__force const char __user *)"/root");
++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((__force const char __user *)".");
+
+ /*
+ * In case that a resume from disk is carried out by linuxrc or one of
+@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
+
+ /* move initrd to rootfs' /old */
+ sys_fchdir(old_fd);
+- sys_mount("/", ".", NULL, MS_MOVE, NULL);
++ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
+ /* switch root and cwd back to / of rootfs */
+ sys_fchdir(root_fd);
+- sys_chroot(".");
++ sys_chroot((__force const char __user *)".");
+ sys_close(old_fd);
+ sys_close(root_fd);
+
+ if (new_decode_dev(real_root_dev) == Root_RAM0) {
+- sys_chdir("/old");
++ sys_chdir((__force const char __user *)"/old");
+ return;
+ }
+
+@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
+ mount_root();
+
+ printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
+ if (!error)
+ printk("okay\n");
+ else {
+- int fd = sys_open("/dev/root.old", O_RDWR, 0);
++ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
+ if (error == -ENOENT)
+ printk("/initrd does not exist. Ignored.\n");
+ else
+ printk("failed\n");
+ printk(KERN_NOTICE "Unmounting old root\n");
+- sys_umount("/old", MNT_DETACH);
++ sys_umount((__force char __user *)"/old", MNT_DETACH);
+ printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+ if (fd < 0) {
+ error = fd;
+@@ -119,11 +119,11 @@ int __init initrd_load(void)
+ * mounted in the normal path.
+ */
+ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+- sys_unlink("/initrd.image");
++ sys_unlink((__force const char __user *)"/initrd.image");
+ handle_initrd();
+ return 1;
+ }
+ }
+- sys_unlink("/initrd.image");
++ sys_unlink((__force const char __user *)"/initrd.image");
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/init/do_mounts_md.c linux-2.6.31.7/init/do_mounts_md.c
+--- linux-2.6.31.7/init/do_mounts_md.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/do_mounts_md.c 2009-12-08 17:39:44.259699939 -0500
+@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
+ partitioned ? "_d" : "", minor,
+ md_setup_args[ent].device_names);
+
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((__force char __user *)name, 0, 0);
+ if (fd < 0) {
+ printk(KERN_ERR "md: open failed - cannot start "
+ "array %s\n", name);
+@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
+ * array without it
+ */
+ sys_close(fd);
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((__force char __user *)name, 0, 0);
+ sys_ioctl(fd, BLKRRPART, 0);
+ }
+ sys_close(fd);
+@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
+
+ wait_for_device_probe();
+
+- fd = sys_open("/dev/md0", 0, 0);
++ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+ sys_close(fd);
+diff -urNp linux-2.6.31.7/init/initramfs.c linux-2.6.31.7/init/initramfs.c
+--- linux-2.6.31.7/init/initramfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/initramfs.c 2009-12-08 17:39:44.260828709 -0500
+@@ -74,7 +74,7 @@ static void __init free_hash(void)
+ }
+ }
+
+-static long __init do_utime(char __user *filename, time_t mtime)
++static long __init do_utime(__force char __user *filename, time_t mtime)
+ {
+ struct timespec t[2];
+
+@@ -109,7 +109,7 @@ static void __init dir_utime(void)
+ struct dir_entry *de, *tmp;
+ list_for_each_entry_safe(de, tmp, &dir_list, list) {
+ list_del(&de->list);
+- do_utime(de->name, de->mtime);
++ do_utime((__force char __user *)de->name, de->mtime);
+ kfree(de->name);
+ kfree(de);
+ }
+@@ -271,7 +271,7 @@ static int __init maybe_link(void)
+ if (nlink >= 2) {
+ char *old = find_link(major, minor, ino, mode, collected);
+ if (old)
+- return (sys_link(old, collected) < 0) ? -1 : 1;
++ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
+ }
+ return 0;
+ }
+@@ -280,11 +280,11 @@ static void __init clean_path(char *path
+ {
+ struct stat st;
+
+- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
++ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
+ if (S_ISDIR(st.st_mode))
+- sys_rmdir(path);
++ sys_rmdir((__force char __user *)path);
+ else
+- sys_unlink(path);
++ sys_unlink((__force char __user *)path);
+ }
+ }
+
+@@ -305,7 +305,7 @@ static int __init do_name(void)
+ int openflags = O_WRONLY|O_CREAT;
+ if (ml != 1)
+ openflags |= O_TRUNC;
+- wfd = sys_open(collected, openflags, mode);
++ wfd = sys_open((__force char __user *)collected, openflags, mode);
+
+ if (wfd >= 0) {
+ sys_fchown(wfd, uid, gid);
+@@ -317,17 +317,17 @@ static int __init do_name(void)
+ }
+ }
+ } else if (S_ISDIR(mode)) {
+- sys_mkdir(collected, mode);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
++ sys_mkdir((__force char __user *)collected, mode);
++ sys_chown((__force char __user *)collected, uid, gid);
++ sys_chmod((__force char __user *)collected, mode);
+ dir_add(collected, mtime);
+ } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ if (maybe_link() == 0) {
+- sys_mknod(collected, mode, rdev);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
+- do_utime(collected, mtime);
++ sys_mknod((__force char __user *)collected, mode, rdev);
++ sys_chown((__force char __user *)collected, uid, gid);
++ sys_chmod((__force char __user *)collected, mode);
++ do_utime((__force char __user *)collected, mtime);
+ }
+ }
+ return 0;
+@@ -336,15 +336,15 @@ static int __init do_name(void)
+ static int __init do_copy(void)
+ {
+ if (count >= body_len) {
+- sys_write(wfd, victim, body_len);
++ sys_write(wfd, (__force char __user *)victim, body_len);
+ sys_close(wfd);
+- do_utime(vcollected, mtime);
++ do_utime((__force char __user *)vcollected, mtime);
+ kfree(vcollected);
+ eat(body_len);
+ state = SkipIt;
+ return 0;
+ } else {
+- sys_write(wfd, victim, count);
++ sys_write(wfd, (__force char __user *)victim, count);
+ body_len -= count;
+ eat(count);
+ return 1;
+@@ -355,9 +355,9 @@ static int __init do_symlink(void)
+ {
+ collected[N_ALIGN(name_len) + body_len] = '\0';
+ clean_path(collected, 0);
+- sys_symlink(collected + N_ALIGN(name_len), collected);
+- sys_lchown(collected, uid, gid);
+- do_utime(collected, mtime);
++ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
++ sys_lchown((__force char __user *)collected, uid, gid);
++ do_utime((__force char __user *)collected, mtime);
+ state = SkipIt;
+ next_state = Reset;
+ return 0;
+diff -urNp linux-2.6.31.7/init/Kconfig linux-2.6.31.7/init/Kconfig
+--- linux-2.6.31.7/init/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/Kconfig 2009-12-08 17:39:44.258741124 -0500
+@@ -1014,7 +1014,7 @@ config STRIP_ASM_SYMS
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
++ default n
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+@@ -1101,9 +1101,9 @@ config HAVE_GENERIC_DMA_COHERENT
+
+ config SLABINFO
+ bool
+- depends on PROC_FS
++ depends on PROC_FS && !GRKERNSEC_PROC_ADD
+ depends on SLAB || SLUB_DEBUG
+- default y
++ default n
+
+ config RT_MUTEXES
+ boolean
+diff -urNp linux-2.6.31.7/init/main.c linux-2.6.31.7/init/main.c
+--- linux-2.6.31.7/init/main.c 2009-12-08 17:29:51.636696310 -0500
++++ linux-2.6.31.7/init/main.c 2009-12-08 17:39:44.260828709 -0500
+@@ -96,6 +96,7 @@ static inline void mark_rodata_ro(void)
+ #ifdef CONFIG_TC
+ extern void tc_init(void);
+ #endif
++extern void grsecurity_init(void);
+
+ enum system_states system_state __read_mostly;
+ EXPORT_SYMBOL(system_state);
+@@ -182,6 +183,35 @@ static int __init set_reset_devices(char
+
+ __setup("reset_devices", set_reset_devices);
+
++#if defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32)
++static int __init setup_pax_nouderef(char *str)
++{
++ unsigned int cpu;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].type = 3;
++ get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ }
++ asm("mov %0, %%ds" : : "r" (__KERNEL_DS) : "memory");
++ asm("mov %0, %%es" : : "r" (__KERNEL_DS) : "memory");
++ asm("mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++
++ return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++unsigned int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++ get_option(&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -370,7 +400,7 @@ static void __init setup_nr_cpu_ids(void
+ }
+
+ #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
++unsigned long __per_cpu_offset[NR_CPUS] __read_only;
+
+ EXPORT_SYMBOL(__per_cpu_offset);
+
+@@ -729,52 +759,53 @@ int initcall_debug;
+ core_param(initcall_debug, initcall_debug, bool, 0644);
+
+ static char msgbuf[64];
+-static struct boot_trace_call call;
+-static struct boot_trace_ret ret;
++static struct boot_trace_call trace_call;
++static struct boot_trace_ret trace_ret;
+
+ int do_one_initcall(initcall_t fn)
+ {
+ int count = preempt_count();
+ ktime_t calltime, delta, rettime;
++ const char *msg1 = "", *msg2 = "";
+
+ if (initcall_debug) {
+- call.caller = task_pid_nr(current);
+- printk("calling %pF @ %i\n", fn, call.caller);
++ trace_call.caller = task_pid_nr(current);
++ printk("calling %pF @ %i\n", fn, trace_call.caller);
+ calltime = ktime_get();
+- trace_boot_call(&call, fn);
++ trace_boot_call(&trace_call, fn);
+ enable_boot_trace();
+ }
+
+- ret.result = fn();
++ trace_ret.result = fn();
+
+ if (initcall_debug) {
+ disable_boot_trace();
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+- trace_boot_ret(&ret, fn);
++ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
++ trace_boot_ret(&trace_ret, fn);
+ printk("initcall %pF returned %d after %Ld usecs\n", fn,
+- ret.result, ret.duration);
++ trace_ret.result, trace_ret.duration);
+ }
+
+ msgbuf[0] = 0;
+
+- if (ret.result && ret.result != -ENODEV && initcall_debug)
+- sprintf(msgbuf, "error code %d ", ret.result);
++ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
++ sprintf(msgbuf, "error code %d ", trace_ret.result);
+
+ if (preempt_count() != count) {
+- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
++ msg1 = " preemption imbalance";
+ preempt_count() = count;
+ }
+ if (irqs_disabled()) {
+- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++ msg2 = " disabled interrupts";
+ local_irq_enable();
+ }
+- if (msgbuf[0]) {
+- printk("initcall %pF returned with %s\n", fn, msgbuf);
++ if (msgbuf[0] || *msg1 || *msg2) {
++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
+ }
+
+- return ret.result;
++ return trace_ret.result;
+ }
+
+
+@@ -913,11 +944,13 @@ static int __init kernel_init(void * unu
+ if (!ramdisk_execute_command)
+ ramdisk_execute_command = "/init";
+
+- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
+ prepare_namespace();
+ }
+
++ grsecurity_init();
++
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+diff -urNp linux-2.6.31.7/init/noinitramfs.c linux-2.6.31.7/init/noinitramfs.c
+--- linux-2.6.31.7/init/noinitramfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/init/noinitramfs.c 2009-12-08 17:39:44.260828709 -0500
+@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
+ {
+ int err;
+
+- err = sys_mkdir("/dev", 0755);
++ err = sys_mkdir((const char __user *)"/dev", 0755);
+ if (err < 0)
+ goto out;
+
+@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
+ if (err < 0)
+ goto out;
+
+- err = sys_mkdir("/root", 0700);
++ err = sys_mkdir((const char __user *)"/root", 0700);
+ if (err < 0)
+ goto out;
+
+diff -urNp linux-2.6.31.7/ipc/ipc_sysctl.c linux-2.6.31.7/ipc/ipc_sysctl.c
+--- linux-2.6.31.7/ipc/ipc_sysctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/ipc/ipc_sysctl.c 2009-12-08 17:39:44.261779891 -0500
+@@ -267,7 +267,7 @@ static struct ctl_table ipc_kern_table[]
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static struct ctl_table ipc_root_table[] = {
+@@ -277,7 +277,7 @@ static struct ctl_table ipc_root_table[]
+ .mode = 0555,
+ .child = ipc_kern_table,
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static int __init ipc_sysctl_init(void)
+diff -urNp linux-2.6.31.7/ipc/mqueue.c linux-2.6.31.7/ipc/mqueue.c
+--- linux-2.6.31.7/ipc/mqueue.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/ipc/mqueue.c 2009-12-08 17:39:44.261779891 -0500
+@@ -77,7 +77,7 @@ struct mqueue_inode_info {
+
+ static const struct inode_operations mqueue_dir_inode_operations;
+ static const struct file_operations mqueue_file_operations;
+-static struct super_operations mqueue_super_ops;
++static const struct super_operations mqueue_super_ops;
+ static void remove_notification(struct mqueue_inode_info *info);
+
+ static struct kmem_cache *mqueue_inode_cachep;
+@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
+ mq_bytes = (mq_msg_tblsz +
+ (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+
++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
+ spin_lock(&mq_lock);
+ if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+ u->mq_bytes + mq_bytes >
+@@ -1224,7 +1225,7 @@ static const struct file_operations mque
+ .read = mqueue_read_file,
+ };
+
+-static struct super_operations mqueue_super_ops = {
++static const struct super_operations mqueue_super_ops = {
+ .alloc_inode = mqueue_alloc_inode,
+ .destroy_inode = mqueue_destroy_inode,
+ .statfs = simple_statfs,
+diff -urNp linux-2.6.31.7/ipc/shm.c linux-2.6.31.7/ipc/shm.c
+--- linux-2.6.31.7/ipc/shm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/ipc/shm.c 2009-12-08 17:39:44.262700313 -0500
+@@ -55,7 +55,7 @@ struct shm_file_data {
+ #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
+
+ static const struct file_operations shm_file_operations;
+-static struct vm_operations_struct shm_vm_ops;
++static const struct vm_operations_struct shm_vm_ops;
+
+ #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
+
+@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid,
++ const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime);
++#endif
++
+ void shm_init_ns(struct ipc_namespace *ns)
+ {
+ ns->shm_ctlmax = SHMMAX;
+@@ -312,7 +320,7 @@ static const struct file_operations shm_
+ .get_unmapped_area = shm_get_unmapped_area,
+ };
+
+-static struct vm_operations_struct shm_vm_ops = {
++static const struct vm_operations_struct shm_vm_ops = {
+ .open = shm_open, /* callback for a new vm-area open */
+ .close = shm_close, /* callback for when the vm-area is released */
+ .fault = shm_fault,
+@@ -395,6 +403,14 @@ static int newseg(struct ipc_namespace *
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+ shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++ {
++ struct timespec timeval;
++ do_posix_clock_monotonic_gettime(&timeval);
++
++ shp->shm_createtime = timeval.tv_sec;
++ }
++#endif
+ shp->shm_segsz = size;
+ shp->shm_nattch = 0;
+ shp->shm_file = file;
+@@ -878,9 +894,21 @@ long do_shmat(int shmid, char __user *sh
+ if (err)
+ goto out_unlock;
+
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++ shp->shm_perm.cuid, shmid) ||
++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++ err = -EACCES;
++ goto out_unlock;
++ }
++#endif
++
+ path.dentry = dget(shp->shm_file->f_path.dentry);
+ path.mnt = shp->shm_file->f_path.mnt;
+ shp->shm_nattch++;
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_lapid = current->pid;
++#endif
+ size = i_size_read(path.dentry->d_inode);
+ shm_unlock(shp);
+
+diff -urNp linux-2.6.31.7/ipc/util.c linux-2.6.31.7/ipc/util.c
+--- linux-2.6.31.7/ipc/util.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/ipc/util.c 2009-12-08 17:39:44.262700313 -0500
+@@ -942,7 +942,7 @@ static int sysvipc_proc_show(struct seq_
+ return iface->show(s, it);
+ }
+
+-static struct seq_operations sysvipc_proc_seqops = {
++static const struct seq_operations sysvipc_proc_seqops = {
+ .start = sysvipc_proc_start,
+ .stop = sysvipc_proc_stop,
+ .next = sysvipc_proc_next,
+diff -urNp linux-2.6.31.7/kernel/acct.c linux-2.6.31.7/kernel/acct.c
+--- linux-2.6.31.7/kernel/acct.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/acct.c 2009-12-08 17:39:44.262700313 -0500
+@@ -578,7 +578,7 @@ static void do_acct_process(struct bsd_a
+ */
+ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+- file->f_op->write(file, (char *)&ac,
++ file->f_op->write(file, (__force char __user *)&ac,
+ sizeof(acct_t), &file->f_pos);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+ set_fs(fs);
+diff -urNp linux-2.6.31.7/kernel/capability.c linux-2.6.31.7/kernel/capability.c
+--- linux-2.6.31.7/kernel/capability.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/capability.c 2009-12-08 17:39:44.263677946 -0500
+@@ -306,10 +306,21 @@ int capable(int cap)
+ BUG();
+ }
+
+- if (security_capable(cap) == 0) {
++ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+ }
++
++int capable_nolog(int cap)
++{
++ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
++ current->flags |= PF_SUPERPRIV;
++ return 1;
++ }
++ return 0;
++}
++
+ EXPORT_SYMBOL(capable);
++EXPORT_SYMBOL(capable_nolog);
+diff -urNp linux-2.6.31.7/kernel/cgroup.c linux-2.6.31.7/kernel/cgroup.c
+--- linux-2.6.31.7/kernel/cgroup.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/cgroup.c 2009-12-08 17:39:44.270556432 -0500
+@@ -596,8 +596,8 @@ void cgroup_unlock(void)
+ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
+ static int cgroup_populate_dir(struct cgroup *cgrp);
+-static struct inode_operations cgroup_dir_inode_operations;
+-static struct file_operations proc_cgroupstats_operations;
++static const struct inode_operations cgroup_dir_inode_operations;
++static const struct file_operations proc_cgroupstats_operations;
+
+ static struct backing_dev_info cgroup_backing_dev_info = {
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+@@ -960,7 +960,7 @@ static int cgroup_remount(struct super_b
+ return ret;
+ }
+
+-static struct super_operations cgroup_ops = {
++static const struct super_operations cgroup_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .show_options = cgroup_show_options,
+@@ -1643,7 +1643,7 @@ static int cgroup_seqfile_release(struct
+ return single_release(inode, file);
+ }
+
+-static struct file_operations cgroup_seqfile_operations = {
++static const struct file_operations cgroup_seqfile_operations = {
+ .read = seq_read,
+ .write = cgroup_file_write,
+ .llseek = seq_lseek,
+@@ -1702,7 +1702,7 @@ static int cgroup_rename(struct inode *o
+ return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+ }
+
+-static struct file_operations cgroup_file_operations = {
++static const struct file_operations cgroup_file_operations = {
+ .read = cgroup_file_read,
+ .write = cgroup_file_write,
+ .llseek = generic_file_llseek,
+@@ -1710,7 +1710,7 @@ static struct file_operations cgroup_fil
+ .release = cgroup_file_release,
+ };
+
+-static struct inode_operations cgroup_dir_inode_operations = {
++static const struct inode_operations cgroup_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = cgroup_mkdir,
+ .rmdir = cgroup_rmdir,
+@@ -2313,7 +2313,7 @@ static int cgroup_tasks_show(struct seq_
+ return seq_printf(s, "%d\n", *(int *)v);
+ }
+
+-static struct seq_operations cgroup_tasks_seq_operations = {
++static const struct seq_operations cgroup_tasks_seq_operations = {
+ .start = cgroup_tasks_start,
+ .stop = cgroup_tasks_stop,
+ .next = cgroup_tasks_next,
+@@ -2350,7 +2350,7 @@ static int cgroup_tasks_release(struct i
+ return seq_release(inode, file);
+ }
+
+-static struct file_operations cgroup_tasks_operations = {
++static const struct file_operations cgroup_tasks_operations = {
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = cgroup_file_write,
+@@ -3016,7 +3016,7 @@ static int cgroup_open(struct inode *ino
+ return single_open(file, proc_cgroup_show, pid);
+ }
+
+-struct file_operations proc_cgroup_operations = {
++const struct file_operations proc_cgroup_operations = {
+ .open = cgroup_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -3045,7 +3045,7 @@ static int cgroupstats_open(struct inode
+ return single_open(file, proc_cgroupstats_show, NULL);
+ }
+
+-static struct file_operations proc_cgroupstats_operations = {
++static const struct file_operations proc_cgroupstats_operations = {
+ .open = cgroupstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/kernel/configs.c linux-2.6.31.7/kernel/configs.c
+--- linux-2.6.31.7/kernel/configs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/configs.c 2009-12-08 17:39:44.270556432 -0500
+@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
+ struct proc_dir_entry *entry;
+
+ /* create the current config file */
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
++ &ikconfig_file_ops);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
++ &ikconfig_file_ops);
++#endif
++#else
+ entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+ &ikconfig_file_ops);
++#endif
++
+ if (!entry)
+ return -ENOMEM;
+
+diff -urNp linux-2.6.31.7/kernel/cpu.c linux-2.6.31.7/kernel/cpu.c
+--- linux-2.6.31.7/kernel/cpu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/cpu.c 2009-12-08 17:39:44.270556432 -0500
+@@ -19,7 +19,7 @@
+ /* Serializes the updates to cpu_online_mask, cpu_present_mask */
+ static DEFINE_MUTEX(cpu_add_remove_lock);
+
+-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
++static RAW_NOTIFIER_HEAD(cpu_chain);
+
+ /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
+ * Should always be manipulated under cpu_add_remove_lock
+diff -urNp linux-2.6.31.7/kernel/cred.c linux-2.6.31.7/kernel/cred.c
+--- linux-2.6.31.7/kernel/cred.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/cred.c 2009-12-08 17:39:44.271811910 -0500
+@@ -366,6 +366,8 @@ int commit_creds(struct cred *new)
+
+ get_cred(new); /* we will require a ref for the subj creds too */
+
++ gr_set_role_label(task, new->uid, new->gid);
++
+ /* dumpability changes */
+ if (old->euid != new->euid ||
+ old->egid != new->egid ||
+diff -urNp linux-2.6.31.7/kernel/exit.c linux-2.6.31.7/kernel/exit.c
+--- linux-2.6.31.7/kernel/exit.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/exit.c 2009-12-08 17:39:44.271811910 -0500
+@@ -56,6 +56,10 @@
+ #include <asm/mmu_context.h>
+ #include "cred-internals.h"
+
++#ifdef CONFIG_GRKERNSEC
++extern rwlock_t grsec_exec_file_lock;
++#endif
++
+ static void exit_mm(struct task_struct * tsk);
+
+ static void __unhash_process(struct task_struct *p)
+@@ -167,6 +171,8 @@ void release_task(struct task_struct * p
+ struct task_struct *leader;
+ int zap_leader;
+ repeat:
++ gr_del_task_from_ip_table(p);
++
+ tracehook_prepare_release_task(p);
+ /* don't need to get the RCU readlock here - the process is dead and
+ * can't be modifying its own credentials */
+@@ -334,11 +340,22 @@ static void reparent_to_kthreadd(void)
+ {
+ write_lock_irq(&tasklist_lock);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
+ ptrace_unlink(current);
+ /* Reparent to init */
+ current->real_parent = current->parent = kthreadd_task;
+ list_move_tail(&current->sibling, &current->real_parent->children);
+
++ gr_set_kernel_label(current);
++
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+@@ -390,7 +407,7 @@ int allow_signal(int sig)
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return 0;
+@@ -426,6 +443,17 @@ void daemonize(const char *name, ...)
+ vsnprintf(current->comm, sizeof(current->comm), name, args);
+ va_end(args);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
++ gr_set_kernel_label(current);
++
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+@@ -953,6 +981,9 @@ NORET_TYPE void do_exit(long code)
+ tsk->exit_code = code;
+ taskstats_exit(tsk, group_dead);
+
++ gr_acl_handle_psacct(tsk, code);
++ gr_acl_handle_exit();
++
+ exit_mm(tsk);
+
+ if (group_dead)
+@@ -1169,7 +1200,7 @@ static int wait_task_zombie(struct wait_
+
+ if (unlikely(wo->wo_flags & WNOWAIT)) {
+ int exit_code = p->exit_code;
+- int why, status;
++ int why;
+
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+diff -urNp linux-2.6.31.7/kernel/fork.c linux-2.6.31.7/kernel/fork.c
+--- linux-2.6.31.7/kernel/fork.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/fork.c 2009-12-08 17:39:44.272812001 -0500
+@@ -244,7 +244,7 @@ static struct task_struct *dup_task_stru
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+- tsk->stack_canary = get_random_int();
++ tsk->stack_canary = pax_get_random_long();
+ #endif
+
+ /* One for us, one for whoever does the "release_task()" (usually parent) */
+@@ -281,8 +281,8 @@ static int dup_mmap(struct mm_struct *mm
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = oldmm->mmap_base;
+- mm->cached_hole_size = ~0UL;
++ mm->free_area_cache = oldmm->free_area_cache;
++ mm->cached_hole_size = oldmm->cached_hole_size;
+ mm->map_count = 0;
+ cpumask_clear(mm_cpumask(mm));
+ mm->mm_rb = RB_ROOT;
+@@ -319,6 +319,7 @@ static int dup_mmap(struct mm_struct *mm
+ tmp->vm_flags &= ~VM_LOCKED;
+ tmp->vm_mm = mm;
+ tmp->vm_next = NULL;
++ tmp->vm_mirror = NULL;
+ anon_vma_link(tmp);
+ file = tmp->vm_file;
+ if (file) {
+@@ -366,6 +367,31 @@ static int dup_mmap(struct mm_struct *mm
+ if (retval)
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++ struct vm_area_struct *mpnt_m;
++
++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++ if (!mpnt->vm_mirror)
++ continue;
++
++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++ mpnt->vm_mirror = mpnt_m;
++ } else {
++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++ mpnt->vm_mirror->vm_mirror = mpnt;
++ }
++ }
++ BUG_ON(mpnt_m);
++ }
++#endif
++
+ /* a new mm has just been created */
+ arch_dup_mmap(oldmm, mm);
+ retval = 0;
+@@ -573,6 +599,7 @@ void mm_release(struct task_struct *tsk,
+ * the value intact in a core dump, and to save the unnecessary
+ * trouble otherwise. Userland only wants this done for a sys_exit.
+ */
++
+ if (tsk->clear_child_tid) {
+ if (!(tsk->flags & PF_SIGNALED) &&
+ atomic_read(&mm->mm_users) > 1) {
+@@ -582,7 +609,7 @@ void mm_release(struct task_struct *tsk,
+ */
+ put_user(0, tsk->clear_child_tid);
+ sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
+- 1, NULL, NULL, 0);
++ 1, NULL, NULL, 0);
+ }
+ tsk->clear_child_tid = NULL;
+ }
+@@ -700,7 +727,7 @@ static int copy_fs(unsigned long clone_f
+ write_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+- fs->users++;
++ atomic_inc(&fs->users);
+ write_unlock(&fs->lock);
+ return 0;
+ }
+@@ -983,6 +1010,9 @@ static struct task_struct *copy_process(
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
+ retval = -EAGAIN;
++
++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
++
+ if (atomic_read(&p->real_cred->user->processes) >=
+ p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+@@ -1139,6 +1169,8 @@ static struct task_struct *copy_process(
+ goto bad_fork_free_pid;
+ }
+
++ gr_copy_label(p);
++
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+@@ -1308,6 +1340,8 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+ free_task(p);
+ fork_out:
++ gr_log_forkfail(retval);
++
+ return ERR_PTR(retval);
+ }
+
+@@ -1401,6 +1435,8 @@ long do_fork(unsigned long clone_flags,
+ if (clone_flags & CLONE_PARENT_SETTID)
+ put_user(nr, parent_tidptr);
+
++ gr_handle_brute_check();
++
+ if (clone_flags & CLONE_VFORK) {
+ p->vfork_done = &vfork;
+ init_completion(&vfork);
+@@ -1533,7 +1569,7 @@ static int unshare_fs(unsigned long unsh
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+- if (fs->users == 1)
++ if (atomic_read(&fs->users) == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+@@ -1656,7 +1692,7 @@ SYSCALL_DEFINE1(unshare, unsigned long,
+ fs = current->fs;
+ write_lock(&fs->lock);
+ current->fs = new_fs;
+- if (--fs->users)
++ if (atomic_dec_return(&fs->users))
+ new_fs = NULL;
+ else
+ new_fs = fs;
+diff -urNp linux-2.6.31.7/kernel/futex.c linux-2.6.31.7/kernel/futex.c
+--- linux-2.6.31.7/kernel/futex.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/futex.c 2009-12-08 17:39:44.272812001 -0500
+@@ -54,6 +54,7 @@
+ #include <linux/mount.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
++#include <linux/ptrace.h>
+ #include <linux/signal.h>
+ #include <linux/module.h>
+ #include <linux/magic.h>
+@@ -222,6 +223,11 @@ get_futex_key(u32 __user *uaddr, int fsh
+ struct page *page;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++ return -EFAULT;
++#endif
++
+ /*
+ * The futex address must be "naturally" aligned.
+ */
+@@ -1801,7 +1807,7 @@ retry:
+
+ restart = &current_thread_info()->restart_block;
+ restart->fn = futex_wait_restart;
+- restart->futex.uaddr = (u32 *)uaddr;
++ restart->futex.uaddr = uaddr;
+ restart->futex.val = val;
+ restart->futex.time = abs_time->tv64;
+ restart->futex.bitset = bitset;
+@@ -2337,7 +2343,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+ {
+ struct robust_list_head __user *head;
+ unsigned long ret;
+- const struct cred *cred = current_cred(), *pcred;
++ const struct cred *cred = current_cred();
++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
++ const struct cred *pcred;
++#endif
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+@@ -2353,11 +2362,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+ if (!p)
+ goto err_unlock;
+ ret = -EPERM;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++#else
+ pcred = __task_cred(p);
+ if (cred->euid != pcred->euid &&
+ cred->euid != pcred->uid &&
+ !capable(CAP_SYS_PTRACE))
+ goto err_unlock;
++#endif
+ head = p->robust_list;
+ rcu_read_unlock();
+ }
+@@ -2419,7 +2433,7 @@ retry:
+ */
+ static inline int fetch_robust_entry(struct robust_list __user **entry,
+ struct robust_list __user * __user *head,
+- int *pi)
++ unsigned int *pi)
+ {
+ unsigned long uentry;
+
+diff -urNp linux-2.6.31.7/kernel/futex_compat.c linux-2.6.31.7/kernel/futex_compat.c
+--- linux-2.6.31.7/kernel/futex_compat.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/futex_compat.c 2009-12-08 17:39:44.272812001 -0500
+@@ -10,6 +10,7 @@
+ #include <linux/compat.h>
+ #include <linux/nsproxy.h>
+ #include <linux/futex.h>
++#include <linux/ptrace.h>
+
+ #include <asm/uaccess.h>
+
+@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
+ {
+ struct compat_robust_list_head __user *head;
+ unsigned long ret;
+- const struct cred *cred = current_cred(), *pcred;
++ const struct cred *cred = current_cred();
++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
++ const struct cred *pcred;
++#endif
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
+ if (!p)
+ goto err_unlock;
+ ret = -EPERM;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++#else
+ pcred = __task_cred(p);
+ if (cred->euid != pcred->euid &&
+ cred->euid != pcred->uid &&
+ !capable(CAP_SYS_PTRACE))
+ goto err_unlock;
++#endif
+ head = p->compat_robust_list;
+ read_unlock(&tasklist_lock);
+ }
+diff -urNp linux-2.6.31.7/kernel/gcov/base.c linux-2.6.31.7/kernel/gcov/base.c
+--- linux-2.6.31.7/kernel/gcov/base.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/gcov/base.c 2009-12-08 17:39:44.273812341 -0500
+@@ -102,11 +102,6 @@ void gcov_enable_events(void)
+ }
+
+ #ifdef CONFIG_MODULES
+-static inline int within(void *addr, void *start, unsigned long size)
+-{
+- return ((addr >= start) && (addr < start + size));
+-}
+-
+ /* Update list and generate events when modules are unloaded. */
+ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
+ prev = NULL;
+ /* Remove entries located in module from linked list. */
+ for (info = gcov_info_head; info; info = info->next) {
+- if (within(info, mod->module_core, mod->core_size)) {
++ if (within_module_core_rw((unsigned long)info, mod)) {
+ if (prev)
+ prev->next = info->next;
+ else
+diff -urNp linux-2.6.31.7/kernel/kallsyms.c linux-2.6.31.7/kernel/kallsyms.c
+--- linux-2.6.31.7/kernel/kallsyms.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/kallsyms.c 2009-12-08 17:39:44.273812341 -0500
+@@ -11,6 +11,9 @@
+ * Changed the compression method from stem compression to "table lookup"
+ * compression (see scripts/kallsyms.c for a more complete description)
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -51,6 +54,9 @@ extern const unsigned long kallsyms_mark
+
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++ if (system_state != SYSTEM_BOOTING)
++ return 0;
++
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+@@ -66,6 +72,9 @@ static inline int is_kernel_text(unsigne
+
+ static inline int is_kernel(unsigned long addr)
+ {
++ if (is_kernel_inittext(addr))
++ return 1;
++
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ return 1;
+ return in_gate_area_no_task(addr);
+@@ -412,7 +421,6 @@ static unsigned long get_ksymbol_core(st
+
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+- iter->name[0] = '\0';
+ iter->nameoff = get_symbol_offset(new_pos);
+ iter->pos = new_pos;
+ }
+@@ -460,6 +468,11 @@ static int s_show(struct seq_file *m, vo
+ {
+ struct kallsym_iter *iter = m->private;
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ if (current_uid())
++ return 0;
++#endif
++
+ /* Some debugging symbols have no name. Ignore them. */
+ if (!iter->name[0])
+ return 0;
+@@ -500,7 +513,7 @@ static int kallsyms_open(struct inode *i
+ struct kallsym_iter *iter;
+ int ret;
+
+- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
++ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ reset_iter(iter, 0);
+@@ -523,6 +536,7 @@ static const struct file_operations kall
+ static int __init kallsyms_init(void)
+ {
+ proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
++
+ return 0;
+ }
+ device_initcall(kallsyms_init);
+diff -urNp linux-2.6.31.7/kernel/kgdb.c linux-2.6.31.7/kernel/kgdb.c
+--- linux-2.6.31.7/kernel/kgdb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/kgdb.c 2009-12-08 17:39:44.274812592 -0500
+@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
+ /* Guard for recursive entry */
+ static int exception_level;
+
+-static struct kgdb_io *kgdb_io_ops;
++static const struct kgdb_io *kgdb_io_ops;
+ static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+ /* kgdb console driver is loaded */
+@@ -1637,7 +1637,7 @@ static void kgdb_initial_breakpoint(void
+ *
+ * Register it with the KGDB core.
+ */
+-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
++int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
+ {
+ int err;
+
+@@ -1682,7 +1682,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
+ *
+ * Unregister it with the KGDB core.
+ */
+-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
++void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
+ {
+ BUG_ON(kgdb_connected);
+
+diff -urNp linux-2.6.31.7/kernel/kmod.c linux-2.6.31.7/kernel/kmod.c
+--- linux-2.6.31.7/kernel/kmod.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/kmod.c 2009-12-08 17:39:44.274812592 -0500
+@@ -84,6 +84,18 @@ int __request_module(bool wait, const ch
+ if (ret >= MODULE_NAME_LEN)
+ return -ENAMETOOLONG;
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ /* we could do a tighter check here, but some distros
++ are taking it upon themselves to remove CAP_SYS_MODULE
++ from even root-running apps which cause modules to be
++ auto-loaded
++ */
++ if (current_uid()) {
++ gr_log_nonroot_mod_load(module_name);
++ return -EPERM;
++ }
++#endif
++
+ /* If modprobe needs a service that is in a module, we get a recursive
+ * loop. Limit the number of running kmod threads to max_threads/2 or
+ * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
+diff -urNp linux-2.6.31.7/kernel/kprobes.c linux-2.6.31.7/kernel/kprobes.c
+--- linux-2.6.31.7/kernel/kprobes.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/kprobes.c 2009-12-08 17:39:44.275810711 -0500
+@@ -184,7 +184,7 @@ static kprobe_opcode_t __kprobes *__get_
+ * kernel image and loaded module images reside. This is required
+ * so x86_64 can correctly handle the %rip-relative fixups.
+ */
+- kip->insns = module_alloc(PAGE_SIZE);
++ kip->insns = module_alloc_exec(PAGE_SIZE);
+ if (!kip->insns) {
+ kfree(kip);
+ return NULL;
+@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
+ hlist_add_head(&kip->hlist,
+ &kprobe_insn_pages);
+ } else {
+- module_free(NULL, kip->insns);
++ module_free_exec(NULL, kip->insns);
+ kfree(kip);
+ }
+ return 1;
+@@ -1329,7 +1329,7 @@ static int __kprobes show_kprobe_addr(st
+ return 0;
+ }
+
+-static struct seq_operations kprobes_seq_ops = {
++static const struct seq_operations kprobes_seq_ops = {
+ .start = kprobe_seq_start,
+ .next = kprobe_seq_next,
+ .stop = kprobe_seq_stop,
+@@ -1341,7 +1341,7 @@ static int __kprobes kprobes_open(struct
+ return seq_open(filp, &kprobes_seq_ops);
+ }
+
+-static struct file_operations debugfs_kprobes_operations = {
++static const struct file_operations debugfs_kprobes_operations = {
+ .open = kprobes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -1523,7 +1523,7 @@ static ssize_t write_enabled_file_bool(s
+ return count;
+ }
+
+-static struct file_operations fops_kp = {
++static const struct file_operations fops_kp = {
+ .read = read_enabled_file_bool,
+ .write = write_enabled_file_bool,
+ };
+diff -urNp linux-2.6.31.7/kernel/lockdep.c linux-2.6.31.7/kernel/lockdep.c
+--- linux-2.6.31.7/kernel/lockdep.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/lockdep.c 2009-12-08 17:39:44.275810711 -0500
+@@ -630,6 +630,10 @@ static int static_obj(void *obj)
+ int i;
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ start = ktla_ktva(start);
++#endif
++
+ /*
+ * static variable?
+ */
+@@ -642,8 +646,7 @@ static int static_obj(void *obj)
+ */
+ for_each_possible_cpu(i) {
+ start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
+- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+- + per_cpu_offset(i);
++ end = start + PERCPU_ENOUGH_ROOM;
+
+ if ((addr >= start) && (addr < end))
+ return 1;
+@@ -760,6 +763,7 @@ register_lock_class(struct lockdep_map *
+ if (!static_obj(lock->key)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
++ printk("lock:%pS key:%pS.\n", lock, lock->key);
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+diff -urNp linux-2.6.31.7/kernel/lockdep_proc.c linux-2.6.31.7/kernel/lockdep_proc.c
+--- linux-2.6.31.7/kernel/lockdep_proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/lockdep_proc.c 2009-12-08 17:39:44.276817709 -0500
+@@ -670,7 +670,7 @@ static int ls_show(struct seq_file *m, v
+ return 0;
+ }
+
+-static struct seq_operations lockstat_ops = {
++static const struct seq_operations lockstat_ops = {
+ .start = ls_start,
+ .next = ls_next,
+ .stop = ls_stop,
+diff -urNp linux-2.6.31.7/kernel/module.c linux-2.6.31.7/kernel/module.c
+--- linux-2.6.31.7/kernel/module.c 2009-12-08 17:29:51.636696310 -0500
++++ linux-2.6.31.7/kernel/module.c 2009-12-08 17:39:44.277660987 -0500
+@@ -83,7 +83,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
+ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
+
+ /* Bounds of module allocation, for speeding __module_address */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+
+ int register_module_notifier(struct notifier_block * nb)
+ {
+@@ -239,7 +240,7 @@ bool each_symbol(bool (*fn)(const struct
+ return true;
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+- struct symsearch arr[] = {
++ struct symsearch modarr[] = {
+ { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+ NOT_GPL_ONLY, false },
+ { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -261,7 +262,7 @@ bool each_symbol(bool (*fn)(const struct
+ #endif
+ };
+
+- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+ return true;
+ }
+ return false;
+@@ -436,7 +437,7 @@ static void *percpu_modalloc(unsigned lo
+ void *ptr;
+ int cpu;
+
+- if (align > PAGE_SIZE) {
++ if (align-1 >= PAGE_SIZE) {
+ printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+ name, align, PAGE_SIZE);
+ align = PAGE_SIZE;
+@@ -549,7 +550,11 @@ static void percpu_modcopy(void *pcpudes
+ int cpu;
+
+ for_each_possible_cpu(cpu)
++#ifdef CONFIG_X86_32
++ memcpy(pcpudest + __per_cpu_offset[cpu], from, size);
++#else
+ memcpy(pcpudest + per_cpu_offset(cpu), from, size);
++#endif
+ }
+
+ #else /* ... !CONFIG_SMP */
+@@ -1516,7 +1521,8 @@ static void free_module(struct module *m
+ destroy_params(mod->kp, mod->num_kp);
+
+ /* This may be NULL, but that's OK */
+- module_free(mod, mod->module_init);
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
+ kfree(mod->args);
+ if (mod->percpu)
+ percpu_modfree(mod->percpu);
+@@ -1525,10 +1531,12 @@ static void free_module(struct module *m
+ percpu_modfree(mod->refptr);
+ #endif
+ /* Free lock-classes: */
+- lockdep_free_key_range(mod->module_core, mod->core_size);
++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
+
+ /* Finally, free the core (containing the module structure) */
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_core_rw);
+ }
+
+ void *__symbol_get(const char *symbol)
+@@ -1618,7 +1626,9 @@ static int simplify_symbols(Elf_Shdr *se
+ strtab + sym[i].st_name, mod);
+ /* Ok if resolved. */
+ if (ksym) {
++ pax_open_kernel();
+ sym[i].st_value = ksym->value;
++ pax_close_kernel();
+ break;
+ }
+
+@@ -1637,7 +1647,9 @@ static int simplify_symbols(Elf_Shdr *se
+ secbase = (unsigned long)mod->percpu;
+ else
+ secbase = sechdrs[sym[i].st_shndx].sh_addr;
++ pax_open_kernel();
+ sym[i].st_value += secbase;
++ pax_close_kernel();
+ break;
+ }
+ }
+@@ -1698,11 +1710,12 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || strstarts(secstrings + s->sh_name, ".init"))
+ continue;
+- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
+ DEBUGP("\t%s\n", secstrings + s->sh_name);
+ }
+- if (m == 0)
+- mod->core_text_size = mod->core_size;
+ }
+
+ DEBUGP("Init section allocation order:\n");
+@@ -1715,12 +1728,13 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || !strstarts(secstrings + s->sh_name, ".init"))
+ continue;
+- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+- | INIT_OFFSET_MASK);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
++ s->sh_entsize |= INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", secstrings + s->sh_name);
+ }
+- if (m == 0)
+- mod->init_text_size = mod->init_size;
+ }
+ }
+
+@@ -1864,9 +1878,14 @@ static void add_kallsyms(struct module *
+ mod->strtab = (void *)sechdrs[strindex].sh_addr;
+
+ /* Set types up while we still have access to sections. */
+- for (i = 0; i < mod->num_symtab; i++)
+- mod->symtab[i].st_info
+- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
++
++ for (i = 0; i < mod->num_symtab; i++) {
++ char type = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
++ pax_open_kernel();
++ mod->symtab[i].st_info = type;
++ pax_close_kernel();
++ }
++
+ }
+ #else
+ static inline void add_kallsyms(struct module *mod,
+@@ -1887,16 +1906,30 @@ static void dynamic_debug_setup(struct _
+ #endif
+ }
+
+-static void *module_alloc_update_bounds(unsigned long size)
++static void *module_alloc_update_bounds_rw(unsigned long size)
+ {
+ void *ret = module_alloc(size);
+
+ if (ret) {
+ /* Update module bounds. */
+- if ((unsigned long)ret < module_addr_min)
+- module_addr_min = (unsigned long)ret;
+- if ((unsigned long)ret + size > module_addr_max)
+- module_addr_max = (unsigned long)ret + size;
++ if ((unsigned long)ret < module_addr_min_rw)
++ module_addr_min_rw = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rw)
++ module_addr_max_rw = (unsigned long)ret + size;
++ }
++ return ret;
++}
++
++static void *module_alloc_update_bounds_rx(unsigned long size)
++{
++ void *ret = module_alloc_exec(size);
++
++ if (ret) {
++ /* Update module bounds. */
++ if ((unsigned long)ret < module_addr_min_rx)
++ module_addr_min_rx = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rx)
++ module_addr_max_rx = (unsigned long)ret + size;
+ }
+ return ret;
+ }
+@@ -1908,8 +1941,8 @@ static void kmemleak_load_module(struct
+ unsigned int i;
+
+ /* only scan the sections containing data */
+- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
+- (unsigned long)mod->module_core,
++ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
++ (unsigned long)mod->module_core_rw,
+ sizeof(struct module), GFP_KERNEL);
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+@@ -1919,8 +1952,8 @@ static void kmemleak_load_module(struct
+ && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
+ continue;
+
+- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
+- (unsigned long)mod->module_core,
++ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
++ (unsigned long)mod->module_core_rw,
+ sechdrs[i].sh_size, GFP_KERNEL);
+ }
+ }
+@@ -2100,7 +2133,7 @@ static noinline struct module *load_modu
+ layout_sections(mod, hdr, sechdrs, secstrings);
+
+ /* Do the allocs. */
+- ptr = module_alloc_update_bounds(mod->core_size);
++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. Just mark it as not being a
+@@ -2111,23 +2144,47 @@ static noinline struct module *load_modu
+ err = -ENOMEM;
+ goto free_percpu;
+ }
+- memset(ptr, 0, mod->core_size);
+- mod->module_core = ptr;
++ memset(ptr, 0, mod->core_size_rw);
++ mod->module_core_rw = ptr;
+
+- ptr = module_alloc_update_bounds(mod->init_size);
++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+ * scanned as it contains data and code that will be freed
+ * after the module is initialized.
+ */
+- kmemleak_ignore(ptr);
+- if (!ptr && mod->init_size) {
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rw) {
+ err = -ENOMEM;
+- goto free_core;
++ goto free_core_rw;
+ }
+- memset(ptr, 0, mod->init_size);
+- mod->module_init = ptr;
++ memset(ptr, 0, mod->init_size_rw);
++ mod->module_init_rw = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr) {
++ err = -ENOMEM;
++ goto free_init_rw;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->core_size_rx);
++ pax_close_kernel();
++ mod->module_core_rx = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rx) {
++ err = -ENOMEM;
++ goto free_core_rx;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->init_size_rx);
++ pax_close_kernel();
++ mod->module_init_rx = ptr;
+
+ /* Transfer each section which specifies SHF_ALLOC */
+ DEBUGP("final section addresses:\n");
+@@ -2137,17 +2194,41 @@ static noinline struct module *load_modu
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+
+- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
+- dest = mod->module_init
+- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
+- else
+- dest = mod->module_core + sechdrs[i].sh_entsize;
++ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
++ dest = mod->module_init_rw
++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
++ else
++ dest = mod->module_init_rx
++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
++ } else {
++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
++ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
++ else
++ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
++ }
++
++ if (sechdrs[i].sh_type != SHT_NOBITS) {
+
+- if (sechdrs[i].sh_type != SHT_NOBITS)
+- memcpy(dest, (void *)sechdrs[i].sh_addr,
+- sechdrs[i].sh_size);
++#ifdef CONFIG_PAX_KERNEXEC
++ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
++ pax_open_kernel();
++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
++ pax_close_kernel();
++ } else
++#endif
++
++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
++ }
+ /* Update sh_addr to point to copy in image. */
+- sechdrs[i].sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
++ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
++ else
++#endif
++
++ sechdrs[i].sh_addr = (unsigned long)dest;
+ DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
+ }
+ /* Module has been moved. */
+@@ -2159,7 +2240,7 @@ static noinline struct module *load_modu
+ mod->name);
+ if (!mod->refptr) {
+ err = -ENOMEM;
+- goto free_init;
++ goto free_init_rx;
+ }
+ #endif
+ /* Now we've moved module, initialize linked lists, etc. */
+@@ -2272,8 +2353,8 @@ static noinline struct module *load_modu
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+- const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
++ strtab = (char *)sechdrs[strindex].sh_addr;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+@@ -2331,12 +2412,12 @@ static noinline struct module *load_modu
+ * Do it before processing of module parameters, so the module
+ * can provide parameter accessor functions of its own.
+ */
+- if (mod->module_init)
+- flush_icache_range((unsigned long)mod->module_init,
+- (unsigned long)mod->module_init
+- + mod->init_size);
+- flush_icache_range((unsigned long)mod->module_core,
+- (unsigned long)mod->module_core + mod->core_size);
++ if (mod->module_init_rx)
++ flush_icache_range((unsigned long)mod->module_init_rx,
++ (unsigned long)mod->module_init_rx
++ + mod->init_size_rx);
++ flush_icache_range((unsigned long)mod->module_core_rx,
++ (unsigned long)mod->module_core_rx + mod->core_size_rx);
+
+ set_fs(old_fs);
+
+@@ -2381,12 +2462,16 @@ static noinline struct module *load_modu
+ free_unload:
+ module_unload_free(mod);
+ #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+- free_init:
++ free_init_rx:
+ percpu_modfree(mod->refptr);
+ #endif
+- module_free(mod, mod->module_init);
+- free_core:
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_init_rx);
++ free_core_rx:
++ module_free_exec(mod, mod->module_core_rx);
++ free_init_rw:
++ module_free(mod, mod->module_init_rw);
++ free_core_rw:
++ module_free(mod, mod->module_core_rw);
+ /* mod will be freed with core. Don't access it beyond this line! */
+ free_percpu:
+ if (percpu)
+@@ -2482,10 +2567,12 @@ SYSCALL_DEFINE3(init_module, void __user
+ /* Drop initial reference. */
+ module_put(mod);
+ trim_init_extable(mod);
+- module_free(mod, mod->module_init);
+- mod->module_init = NULL;
+- mod->init_size = 0;
+- mod->init_text_size = 0;
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
++ mod->module_init_rw = NULL;
++ mod->module_init_rx = NULL;
++ mod->init_size_rw = 0;
++ mod->init_size_rx = 0;
+ mutex_unlock(&module_mutex);
+
+ return 0;
+@@ -2516,10 +2603,16 @@ static const char *get_ksymbol(struct mo
+ unsigned long nextval;
+
+ /* At worse, next value is at end of module */
+- if (within_module_init(addr, mod))
+- nextval = (unsigned long)mod->module_init+mod->init_text_size;
++ if (within_module_init_rx(addr, mod))
++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
++ else if (within_module_init_rw(addr, mod))
++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
++ else if (within_module_core_rx(addr, mod))
++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
++ else if (within_module_core_rw(addr, mod))
++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
+ else
+- nextval = (unsigned long)mod->module_core+mod->core_text_size;
++ return NULL;
+
+ /* Scan for closest preceeding symbol, and next symbol. (ELF
+ starts real symbols at 1). */
+@@ -2765,7 +2858,7 @@ static int m_show(struct seq_file *m, vo
+ char buf[8];
+
+ seq_printf(m, "%s %u",
+- mod->name, mod->init_size + mod->core_size);
++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
+ print_unload_info(m, mod);
+
+ /* Informative for users. */
+@@ -2774,7 +2867,7 @@ static int m_show(struct seq_file *m, vo
+ mod->state == MODULE_STATE_COMING ? "Loading":
+ "Live");
+ /* Used by oprofile and other similar tools. */
+- seq_printf(m, " 0x%p", mod->module_core);
++ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
+
+ /* Taints info */
+ if (mod->taints)
+@@ -2810,7 +2903,17 @@ static const struct file_operations proc
+
+ static int __init proc_modules_init(void)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
++#else
+ proc_create("modules", 0, NULL, &proc_modules_operations);
++#endif
++#else
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_modules_init);
+@@ -2869,12 +2972,12 @@ struct module *__module_address(unsigned
+ {
+ struct module *mod;
+
+- if (addr < module_addr_min || addr > module_addr_max)
++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++ (addr < module_addr_min_rw || addr > module_addr_max_rw))
+ return NULL;
+
+ list_for_each_entry_rcu(mod, &modules, list)
+- if (within_module_core(addr, mod)
+- || within_module_init(addr, mod))
++ if (within_module_init(addr, mod) || within_module_core(addr, mod))
+ return mod;
+ return NULL;
+ }
+@@ -2908,11 +3011,20 @@ bool is_module_text_address(unsigned lon
+ */
+ struct module *__module_text_address(unsigned long addr)
+ {
+- struct module *mod = __module_address(addr);
++ struct module *mod;
++
++#ifdef CONFIG_X86_32
++ addr = ktla_ktva(addr);
++#endif
++
++ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++ return NULL;
++
++ mod = __module_address(addr);
++
+ if (mod) {
+ /* Make sure it's within the text section. */
+- if (!within(addr, mod->module_init, mod->init_text_size)
+- && !within(addr, mod->module_core, mod->core_text_size))
++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
+ mod = NULL;
+ }
+ return mod;
+diff -urNp linux-2.6.31.7/kernel/panic.c linux-2.6.31.7/kernel/panic.c
+--- linux-2.6.31.7/kernel/panic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/panic.c 2009-12-08 17:39:44.277660987 -0500
+@@ -391,7 +391,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+ */
+ void __stack_chk_fail(void)
+ {
+- panic("stack-protector: Kernel stack is corrupted in: %p\n",
++ dump_stack();
++ panic("stack-protector: Kernel stack is corrupted in: %pS\n",
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff -urNp linux-2.6.31.7/kernel/params.c linux-2.6.31.7/kernel/params.c
+--- linux-2.6.31.7/kernel/params.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/params.c 2009-12-08 17:39:44.277660987 -0500
+@@ -724,7 +724,7 @@ static ssize_t module_attr_store(struct
+ return ret;
+ }
+
+-static struct sysfs_ops module_sysfs_ops = {
++static const struct sysfs_ops module_sysfs_ops = {
+ .show = module_attr_show,
+ .store = module_attr_store,
+ };
+@@ -738,7 +738,7 @@ static int uevent_filter(struct kset *ks
+ return 0;
+ }
+
+-static struct kset_uevent_ops module_uevent_ops = {
++static const struct kset_uevent_ops module_uevent_ops = {
+ .filter = uevent_filter,
+ };
+
+diff -urNp linux-2.6.31.7/kernel/perf_counter.c linux-2.6.31.7/kernel/perf_counter.c
+--- linux-2.6.31.7/kernel/perf_counter.c 2009-12-08 17:29:51.637692433 -0500
++++ linux-2.6.31.7/kernel/perf_counter.c 2009-12-08 17:39:44.278819033 -0500
+@@ -2231,7 +2231,7 @@ static void perf_mmap_close(struct vm_ar
+ }
+ }
+
+-static struct vm_operations_struct perf_mmap_vmops = {
++static const struct vm_operations_struct perf_mmap_vmops = {
+ .open = perf_mmap_open,
+ .close = perf_mmap_close,
+ .fault = perf_mmap_fault,
+diff -urNp linux-2.6.31.7/kernel/pid.c linux-2.6.31.7/kernel/pid.c
+--- linux-2.6.31.7/kernel/pid.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/pid.c 2009-12-08 17:39:44.279661702 -0500
+@@ -33,6 +33,7 @@
+ #include <linux/rculist.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/security.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
+@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
+
+ int pid_max = PID_MAX_DEFAULT;
+
+-#define RESERVED_PIDS 300
++#define RESERVED_PIDS 500
+
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+@@ -380,7 +381,14 @@ EXPORT_SYMBOL(pid_task);
+ */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
+- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++ struct task_struct *task;
++
++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++ if (gr_pid_is_chrooted(task))
++ return NULL;
++
++ return task;
+ }
+
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+diff -urNp linux-2.6.31.7/kernel/posix-cpu-timers.c linux-2.6.31.7/kernel/posix-cpu-timers.c
+--- linux-2.6.31.7/kernel/posix-cpu-timers.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/posix-cpu-timers.c 2009-12-08 17:39:44.279661702 -0500
+@@ -6,6 +6,7 @@
+ #include <linux/posix-timers.h>
+ #include <linux/errno.h>
+ #include <linux/math64.h>
++#include <linux/security.h>
+ #include <asm/uaccess.h>
+ #include <linux/kernel_stat.h>
+
+@@ -1041,6 +1042,7 @@ static void check_thread_timers(struct t
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
++ gr_learn_resource(tsk, RLIMIT_RTTIME, tsk->rt.timeout, 1);
+ if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+@@ -1196,6 +1198,7 @@ static void check_process_timers(struct
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
++ gr_learn_resource(tsk, RLIMIT_CPU, psecs, 0);
+ if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+diff -urNp linux-2.6.31.7/kernel/power/hibernate.c linux-2.6.31.7/kernel/power/hibernate.c
+--- linux-2.6.31.7/kernel/power/hibernate.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/power/hibernate.c 2009-12-08 17:39:44.279661702 -0500
+@@ -48,14 +48,14 @@ enum {
+
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
+
+-static struct platform_hibernation_ops *hibernation_ops;
++static const struct platform_hibernation_ops *hibernation_ops;
+
+ /**
+ * hibernation_set_ops - set the global hibernate operations
+ * @ops: the hibernation operations to use in subsequent hibernation transitions
+ */
+
+-void hibernation_set_ops(struct platform_hibernation_ops *ops)
++void hibernation_set_ops(const struct platform_hibernation_ops *ops)
+ {
+ if (ops && !(ops->begin && ops->end && ops->pre_snapshot
+ && ops->prepare && ops->finish && ops->enter && ops->pre_restore
+diff -urNp linux-2.6.31.7/kernel/power/poweroff.c linux-2.6.31.7/kernel/power/poweroff.c
+--- linux-2.6.31.7/kernel/power/poweroff.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/power/poweroff.c 2009-12-08 17:39:44.279661702 -0500
+@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+ };
+
+-static int pm_sysrq_init(void)
++static int __init pm_sysrq_init(void)
+ {
+ register_sysrq_key('o', &sysrq_poweroff_op);
+ return 0;
+diff -urNp linux-2.6.31.7/kernel/power/process.c linux-2.6.31.7/kernel/power/process.c
+--- linux-2.6.31.7/kernel/power/process.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/power/process.c 2009-12-08 17:39:44.280808671 -0500
+@@ -36,12 +36,15 @@ static int try_to_freeze_tasks(bool sig_
+ struct timeval start, end;
+ u64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
++ bool timedout = false;
+
+ do_gettimeofday(&start);
+
+ end_time = jiffies + TIMEOUT;
+ do {
+ todo = 0;
++ if (time_after(jiffies, end_time))
++ timedout = true;
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ if (frozen(p) || !freezeable(p))
+@@ -56,15 +59,17 @@ static int try_to_freeze_tasks(bool sig_
+ * It is "frozen enough". If the task does wake
+ * up, it will immediately call try_to_freeze.
+ */
+- if (!task_is_stopped_or_traced(p) &&
+- !freezer_should_skip(p))
++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
+ todo++;
++ if (timedout) {
++ printk(KERN_ERR "Task refusing to freeze:\n");
++ sched_show_task(p);
++ }
++ }
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+ yield(); /* Yield is okay here */
+- if (time_after(jiffies, end_time))
+- break;
+- } while (todo);
++ } while (todo && !timedout);
+
+ do_gettimeofday(&end);
+ elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+diff -urNp linux-2.6.31.7/kernel/power/suspend.c linux-2.6.31.7/kernel/power/suspend.c
+--- linux-2.6.31.7/kernel/power/suspend.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/power/suspend.c 2009-12-08 17:39:44.280808671 -0500
+@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
+ [PM_SUSPEND_MEM] = "mem",
+ };
+
+-static struct platform_suspend_ops *suspend_ops;
++static const struct platform_suspend_ops *suspend_ops;
+
+ /**
+ * suspend_set_ops - Set the global suspend method table.
+ * @ops: Pointer to ops structure.
+ */
+-void suspend_set_ops(struct platform_suspend_ops *ops)
++void suspend_set_ops(const struct platform_suspend_ops *ops)
+ {
+ mutex_lock(&pm_mutex);
+ suspend_ops = ops;
+diff -urNp linux-2.6.31.7/kernel/printk.c linux-2.6.31.7/kernel/printk.c
+--- linux-2.6.31.7/kernel/printk.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/printk.c 2009-12-08 17:39:44.281809150 -0500
+@@ -272,6 +272,11 @@ int do_syslog(int type, char __user *buf
+ char c;
+ int error = 0;
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++#endif
++
+ error = security_syslog(type);
+ if (error)
+ return error;
+diff -urNp linux-2.6.31.7/kernel/ptrace.c linux-2.6.31.7/kernel/ptrace.c
+--- linux-2.6.31.7/kernel/ptrace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/ptrace.c 2009-12-08 17:39:44.281809150 -0500
+@@ -141,7 +141,7 @@ int __ptrace_may_access(struct task_stru
+ cred->gid != tcred->egid ||
+ cred->gid != tcred->sgid ||
+ cred->gid != tcred->gid) &&
+- !capable(CAP_SYS_PTRACE)) {
++ !capable_nolog(CAP_SYS_PTRACE)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+@@ -149,7 +149,7 @@ int __ptrace_may_access(struct task_stru
+ smp_rmb();
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+- if (!dumpable && !capable(CAP_SYS_PTRACE))
++ if (!dumpable && !capable_nolog(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ return security_ptrace_may_access(task, mode);
+@@ -199,7 +199,7 @@ int ptrace_attach(struct task_struct *ta
+ goto unlock_tasklist;
+
+ task->ptrace = PT_PTRACED;
+- if (capable(CAP_SYS_PTRACE))
++ if (capable_nolog(CAP_SYS_PTRACE))
+ task->ptrace |= PT_PTRACE_CAP;
+
+ __ptrace_link(task, current);
+@@ -529,18 +529,18 @@ int ptrace_request(struct task_struct *c
+ ret = ptrace_setoptions(child, data);
+ break;
+ case PTRACE_GETEVENTMSG:
+- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
++ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
+ break;
+
+ case PTRACE_GETSIGINFO:
+ ret = ptrace_getsiginfo(child, &siginfo);
+ if (!ret)
+- ret = copy_siginfo_to_user((siginfo_t __user *) data,
++ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
+ &siginfo);
+ break;
+
+ case PTRACE_SETSIGINFO:
+- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
++ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
+ sizeof siginfo))
+ ret = -EFAULT;
+ else
+@@ -618,6 +618,11 @@ SYSCALL_DEFINE4(ptrace, long, request, l
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+@@ -650,7 +655,7 @@ int generic_ptrace_peekdata(struct task_
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+- return put_user(tmp, (unsigned long __user *)data);
++ return put_user(tmp, (__force unsigned long __user *)data);
+ }
+
+ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+diff -urNp linux-2.6.31.7/kernel/rcupreempt_trace.c linux-2.6.31.7/kernel/rcupreempt_trace.c
+--- linux-2.6.31.7/kernel/rcupreempt_trace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/rcupreempt_trace.c 2009-12-08 17:39:44.281809150 -0500
+@@ -261,17 +261,17 @@ static ssize_t rcuctrs_read(struct file
+ return bcount;
+ }
+
+-static struct file_operations rcustats_fops = {
++static const struct file_operations rcustats_fops = {
+ .owner = THIS_MODULE,
+ .read = rcustats_read,
+ };
+
+-static struct file_operations rcugp_fops = {
++static const struct file_operations rcugp_fops = {
+ .owner = THIS_MODULE,
+ .read = rcugp_read,
+ };
+
+-static struct file_operations rcuctrs_fops = {
++static const struct file_operations rcuctrs_fops = {
+ .owner = THIS_MODULE,
+ .read = rcuctrs_read,
+ };
+diff -urNp linux-2.6.31.7/kernel/rcutree_trace.c linux-2.6.31.7/kernel/rcutree_trace.c
+--- linux-2.6.31.7/kernel/rcutree_trace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/rcutree_trace.c 2009-12-08 17:39:44.282813273 -0500
+@@ -88,7 +88,7 @@ static int rcudata_open(struct inode *in
+ return single_open(file, show_rcudata, NULL);
+ }
+
+-static struct file_operations rcudata_fops = {
++static const struct file_operations rcudata_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_open,
+ .read = seq_read,
+@@ -136,7 +136,7 @@ static int rcudata_csv_open(struct inode
+ return single_open(file, show_rcudata_csv, NULL);
+ }
+
+-static struct file_operations rcudata_csv_fops = {
++static const struct file_operations rcudata_csv_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_csv_open,
+ .read = seq_read,
+@@ -183,7 +183,7 @@ static int rcuhier_open(struct inode *in
+ return single_open(file, show_rcuhier, NULL);
+ }
+
+-static struct file_operations rcuhier_fops = {
++static const struct file_operations rcuhier_fops = {
+ .owner = THIS_MODULE,
+ .open = rcuhier_open,
+ .read = seq_read,
+@@ -205,7 +205,7 @@ static int rcugp_open(struct inode *inod
+ return single_open(file, show_rcugp, NULL);
+ }
+
+-static struct file_operations rcugp_fops = {
++static const struct file_operations rcugp_fops = {
+ .owner = THIS_MODULE,
+ .open = rcugp_open,
+ .read = seq_read,
+@@ -255,7 +255,7 @@ static int rcu_pending_open(struct inode
+ return single_open(file, show_rcu_pending, NULL);
+ }
+
+-static struct file_operations rcu_pending_fops = {
++static const struct file_operations rcu_pending_fops = {
+ .owner = THIS_MODULE,
+ .open = rcu_pending_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/kernel/relay.c linux-2.6.31.7/kernel/relay.c
+--- linux-2.6.31.7/kernel/relay.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/relay.c 2009-12-08 17:39:44.282813273 -0500
+@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_are
+ /*
+ * vm_ops for relay file mappings.
+ */
+-static struct vm_operations_struct relay_file_mmap_ops = {
++static const struct vm_operations_struct relay_file_mmap_ops = {
+ .fault = relay_buf_fault,
+ .close = relay_file_mmap_close,
+ };
+@@ -1292,7 +1292,7 @@ static int subbuf_splice_actor(struct fi
+ return 0;
+
+ ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
+- if (ret < 0 || ret < total_len)
++ if ((int)ret < 0 || ret < total_len)
+ return ret;
+
+ if (read_start + ret == nonpad_end)
+diff -urNp linux-2.6.31.7/kernel/resource.c linux-2.6.31.7/kernel/resource.c
+--- linux-2.6.31.7/kernel/resource.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/resource.c 2009-12-08 17:39:44.282813273 -0500
+@@ -132,8 +132,18 @@ static const struct file_operations proc
+
+ static int __init ioresources_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
++#endif
++#else
+ proc_create("ioports", 0, NULL, &proc_ioports_operations);
+ proc_create("iomem", 0, NULL, &proc_iomem_operations);
++#endif
+ return 0;
+ }
+ __initcall(ioresources_init);
+diff -urNp linux-2.6.31.7/kernel/sched.c linux-2.6.31.7/kernel/sched.c
+--- linux-2.6.31.7/kernel/sched.c 2009-12-08 17:29:51.639742169 -0500
++++ linux-2.6.31.7/kernel/sched.c 2009-12-08 17:39:44.284659148 -0500
+@@ -820,7 +820,7 @@ static int sched_feat_open(struct inode
+ return single_open(filp, sched_feat_show, NULL);
+ }
+
+-static struct file_operations sched_feat_fops = {
++static const struct file_operations sched_feat_fops = {
+ .open = sched_feat_open,
+ .write = sched_feat_write,
+ .read = seq_read,
+@@ -5978,6 +5978,8 @@ int can_nice(const struct task_struct *p
+ /* convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = 20 - nice;
+
++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
++
+ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+ capable(CAP_SYS_NICE));
+ }
+@@ -6011,7 +6013,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+ if (nice > 19)
+ nice = 19;
+
+- if (increment < 0 && !can_nice(current, nice))
++ if (increment < 0 && (!can_nice(current, nice) ||
++ gr_handle_chroot_nice()))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+@@ -6153,6 +6156,8 @@ recheck:
+ if (rt_policy(policy)) {
+ unsigned long rlim_rtprio;
+
++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
++
+ if (!lock_task_sighand(p, &flags))
+ return -ESRCH;
+ rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
+@@ -7300,7 +7305,7 @@ static struct ctl_table sd_ctl_dir[] = {
+ .procname = "sched_domain",
+ .mode = 0555,
+ },
+- {0, },
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static struct ctl_table sd_ctl_root[] = {
+@@ -7310,7 +7315,7 @@ static struct ctl_table sd_ctl_root[] =
+ .mode = 0555,
+ .child = sd_ctl_dir,
+ },
+- {0, },
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static struct ctl_table *sd_alloc_ctl_entry(int n)
+diff -urNp linux-2.6.31.7/kernel/signal.c linux-2.6.31.7/kernel/signal.c
+--- linux-2.6.31.7/kernel/signal.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/signal.c 2009-12-08 17:39:44.285810059 -0500
+@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
+ */
+ user = get_uid(__task_cred(t)->user);
+ atomic_inc(&user->sigpending);
++
++ if (!override_rlimit)
++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+@@ -625,6 +628,9 @@ static int check_kill_permission(int sig
+ }
+ }
+
++ if (gr_handle_signal(t, sig))
++ return -EPERM;
++
+ return security_task_kill(t, info, sig, 0);
+ }
+
+@@ -939,8 +945,8 @@ static void print_fatal_signal(struct pt
+ for (i = 0; i < 16; i++) {
+ unsigned char insn;
+
+- __get_user(insn, (unsigned char *)(regs->ip + i));
+- printk("%02x ", insn);
++ if (!get_user(insn, (unsigned char __user *)(regs->ip + i)))
++ printk("%02x ", insn);
+ }
+ }
+ #endif
+@@ -965,7 +971,7 @@ __group_send_sig_info(int sig, struct si
+ return send_signal(sig, info, p, 1);
+ }
+
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ return send_signal(sig, info, t, 0);
+@@ -1005,6 +1011,9 @@ force_sig_info(int sig, struct siginfo *
+ ret = specific_send_sig_info(sig, info, t);
+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
++ gr_handle_crash(t, sig);
++
+ return ret;
+ }
+
+@@ -1079,6 +1088,8 @@ int group_send_sig_info(int sig, struct
+ ret = __group_send_sig_info(sig, info, p);
+ unlock_task_sighand(p, &flags);
+ }
++ if (!ret)
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
+ }
+
+ return ret;
+diff -urNp linux-2.6.31.7/kernel/smp.c linux-2.6.31.7/kernel/smp.c
+--- linux-2.6.31.7/kernel/smp.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/smp.c 2009-12-08 17:39:44.285810059 -0500
+@@ -451,22 +451,22 @@ int smp_call_function(void (*func)(void
+ }
+ EXPORT_SYMBOL(smp_call_function);
+
+-void ipi_call_lock(void)
++void ipi_call_lock(void) __acquires(call_function.lock)
+ {
+ spin_lock(&call_function.lock);
+ }
+
+-void ipi_call_unlock(void)
++void ipi_call_unlock(void) __releases(call_function.lock)
+ {
+ spin_unlock(&call_function.lock);
+ }
+
+-void ipi_call_lock_irq(void)
++void ipi_call_lock_irq(void) __acquires(call_function.lock)
+ {
+ spin_lock_irq(&call_function.lock);
+ }
+
+-void ipi_call_unlock_irq(void)
++void ipi_call_unlock_irq(void) __releases(call_function.lock)
+ {
+ spin_unlock_irq(&call_function.lock);
+ }
+diff -urNp linux-2.6.31.7/kernel/softirq.c linux-2.6.31.7/kernel/softirq.c
+--- linux-2.6.31.7/kernel/softirq.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/softirq.c 2009-12-08 17:39:44.286709061 -0500
+@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
+
+ static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+-char *softirq_to_name[NR_SOFTIRQS] = {
++const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+diff -urNp linux-2.6.31.7/kernel/sys.c linux-2.6.31.7/kernel/sys.c
+--- linux-2.6.31.7/kernel/sys.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/sys.c 2009-12-08 17:39:44.295680272 -0500
+@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
+ error = -EACCES;
+ goto out;
+ }
++
++ if (gr_handle_chroot_setpriority(p, niceval)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ no_nice = security_task_setnice(p, niceval);
+ if (no_nice) {
+ error = no_nice;
+@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
+ !(user = find_user(who)))
+ goto out_unlock; /* No processes for this user */
+
+- do_each_thread(g, p)
++ do_each_thread(g, p) {
+ if (__task_cred(p)->uid == who)
+ error = set_one_prio(p, niceval, error);
+- while_each_thread(g, p);
++ } while_each_thread(g, p);
+ if (who != cred->uid)
+ free_uid(user); /* For find_user() */
+ break;
+@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
+ !(user = find_user(who)))
+ goto out_unlock; /* No processes for this user */
+
+- do_each_thread(g, p)
++ do_each_thread(g, p) {
+ if (__task_cred(p)->uid == who) {
+ niceval = 20 - task_nice(p);
+ if (niceval > retval)
+ retval = niceval;
+ }
+- while_each_thread(g, p);
++ } while_each_thread(g, p);
+ if (who != cred->uid)
+ free_uid(user); /* for find_user() */
+ break;
+@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
+ goto error;
+ }
+
++ if (gr_check_group_change(new->gid, new->egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1 ||
+ (egid != (gid_t) -1 && egid != old->gid))
+ new->sgid = new->egid;
+@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+ goto error;
+
+ retval = -EPERM;
++
++ if (gr_check_group_change(gid, gid, gid))
++ goto error;
++
+ if (capable(CAP_SETGID))
+ new->gid = new->egid = new->sgid = new->fsgid = gid;
+ else if (gid == old->gid || gid == old->sgid)
+@@ -632,6 +645,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
+ goto error;
+ }
+
++ if (gr_check_user_change(new->uid, new->euid, -1))
++ goto error;
++
+ if (new->uid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+@@ -680,6 +696,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+ goto error;
+
+ retval = -EPERM;
++
++ if (gr_check_crash_uid(uid))
++ goto error;
++ if (gr_check_user_change(uid, uid, uid))
++ goto error;
++
+ if (capable(CAP_SETUID)) {
+ new->suid = new->uid = uid;
+ if (uid != old->uid) {
+@@ -737,6 +759,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
+ goto error;
+ }
+
++ if (gr_check_user_change(ruid, euid, -1))
++ goto error;
++
+ if (ruid != (uid_t) -1) {
+ new->uid = ruid;
+ if (ruid != old->uid) {
+@@ -805,6 +830,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
+ goto error;
+ }
+
++ if (gr_check_group_change(rgid, egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1)
+ new->gid = rgid;
+ if (egid != (gid_t) -1)
+@@ -854,6 +882,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
+ goto error;
+
++ if (gr_check_user_change(-1, -1, uid))
++ goto error;
++
+ if (uid == old->uid || uid == old->euid ||
+ uid == old->suid || uid == old->fsuid ||
+ capable(CAP_SETUID)) {
+@@ -894,6 +925,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+ if (gid == old->gid || gid == old->egid ||
+ gid == old->sgid || gid == old->fsgid ||
+ capable(CAP_SETGID)) {
++ if (gr_check_group_change(-1, -1, gid))
++ goto error;
++
+ if (gid != old_fsgid) {
+ new->fsgid = gid;
+ goto change_okay;
+@@ -1443,7 +1477,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ error = get_dumpable(me->mm);
+ break;
+ case PR_SET_DUMPABLE:
+- if (arg2 < 0 || arg2 > 1) {
++ if (arg2 > 1) {
+ error = -EINVAL;
+ break;
+ }
+diff -urNp linux-2.6.31.7/kernel/sysctl.c linux-2.6.31.7/kernel/sysctl.c
+--- linux-2.6.31.7/kernel/sysctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/sysctl.c 2009-12-08 17:39:44.296810422 -0500
+@@ -65,6 +65,13 @@
+ static int deprecated_sysctl_warning(struct __sysctl_args *args);
+
+ #if defined(CONFIG_SYSCTL)
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++ const int op);
++extern int gr_handle_chroot_sysctl(const int op);
+
+ /* External variables not in a header file. */
+ extern int C_A_D;
+@@ -163,6 +170,7 @@ static int proc_do_cad_pid(struct ctl_ta
+ static int proc_taint(struct ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+ #endif
++extern ctl_table grsecurity_table[];
+
+ static struct ctl_table root_table[];
+ static struct ctl_table_root sysctl_table_root;
+@@ -195,6 +203,21 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
+
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++
++ { .ctl_name = 0 }
++};
++#endif
++
+ extern int prove_locking;
+ extern int lock_stat;
+
+@@ -246,6 +269,24 @@ static int max_wakeup_granularity_ns = N
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "grsecurity",
++ .mode = 0500,
++ .child = grsecurity_table,
++ },
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ #ifdef CONFIG_SCHED_DEBUG
+ {
+ .ctl_name = CTL_UNNUMBERED,
+@@ -1734,6 +1775,8 @@ static int do_sysctl_strategy(struct ctl
+ return 0;
+ }
+
++static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
++
+ static int parse_table(int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen,
+@@ -1752,7 +1795,7 @@ repeat:
+ if (n == table->ctl_name) {
+ int error;
+ if (table->child) {
+- if (sysctl_perm(root, table, MAY_EXEC))
++ if (sysctl_perm_nochk(root, table, MAY_EXEC))
+ return -EPERM;
+ name++;
+ nlen--;
+@@ -1837,6 +1880,33 @@ int sysctl_perm(struct ctl_table_root *r
+ int error;
+ int mode;
+
++ if (table->parent != NULL && table->parent->procname != NULL &&
++ table->procname != NULL &&
++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
++ return -EACCES;
++ if (gr_handle_chroot_sysctl(op))
++ return -EACCES;
++ error = gr_handle_sysctl(table, op);
++ if (error)
++ return error;
++
++ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
++ if (error)
++ return error;
++
++ if (root->permissions)
++ mode = root->permissions(root, current->nsproxy, table);
++ else
++ mode = table->mode;
++
++ return test_perm(mode, op);
++}
++
++int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
++{
++ int error;
++ int mode;
++
+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error)
+ return error;
+diff -urNp linux-2.6.31.7/kernel/taskstats.c linux-2.6.31.7/kernel/taskstats.c
+--- linux-2.6.31.7/kernel/taskstats.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/taskstats.c 2009-12-08 17:39:44.296810422 -0500
+@@ -26,9 +26,12 @@
+ #include <linux/cgroup.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/grsecurity.h>
+ #include <net/genetlink.h>
+ #include <asm/atomic.h>
+
++extern int gr_is_taskstats_denied(int pid);
++
+ /*
+ * Maximum length of a cpumask that can be specified in
+ * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+@@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
+ size_t size;
+ cpumask_var_t mask;
+
++ if (gr_is_taskstats_denied(current->pid))
++ return -EACCES;
++
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+diff -urNp linux-2.6.31.7/kernel/time/tick-broadcast.c linux-2.6.31.7/kernel/time/tick-broadcast.c
+--- linux-2.6.31.7/kernel/time/tick-broadcast.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/time/tick-broadcast.c 2009-12-08 17:39:44.296810422 -0500
+@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
+ * then clear the broadcast bit.
+ */
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
+- int cpu = smp_processor_id();
++ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ tick_broadcast_clear_oneshot(cpu);
+diff -urNp linux-2.6.31.7/kernel/time/timer_list.c linux-2.6.31.7/kernel/time/timer_list.c
+--- linux-2.6.31.7/kernel/time/timer_list.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/time/timer_list.c 2009-12-08 17:39:44.296810422 -0500
+@@ -275,7 +275,7 @@ static int timer_list_open(struct inode
+ return single_open(filp, timer_list_show, NULL);
+ }
+
+-static struct file_operations timer_list_fops = {
++static const struct file_operations timer_list_fops = {
+ .open = timer_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/kernel/time/timer_stats.c linux-2.6.31.7/kernel/time/timer_stats.c
+--- linux-2.6.31.7/kernel/time/timer_stats.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/time/timer_stats.c 2009-12-08 17:39:44.297815263 -0500
+@@ -395,7 +395,7 @@ static int tstats_open(struct inode *ino
+ return single_open(filp, tstats_show, NULL);
+ }
+
+-static struct file_operations tstats_fops = {
++static const struct file_operations tstats_fops = {
+ .open = tstats_open,
+ .read = seq_read,
+ .write = tstats_write,
+diff -urNp linux-2.6.31.7/kernel/time.c linux-2.6.31.7/kernel/time.c
+--- linux-2.6.31.7/kernel/time.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/time.c 2009-12-08 17:39:44.297815263 -0500
+@@ -94,6 +94,9 @@ SYSCALL_DEFINE1(stime, time_t __user *,
+ return err;
+
+ do_settimeofday(&tv);
++
++ gr_log_timechange();
++
+ return 0;
+ }
+
+@@ -202,6 +205,8 @@ SYSCALL_DEFINE2(settimeofday, struct tim
+ return -EFAULT;
+ }
+
++ gr_log_timechange();
++
+ return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+ }
+
+@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
+ * Avoid unnecessary multiplications/divisions in the
+ * two most common HZ cases:
+ */
+-unsigned int inline jiffies_to_msecs(const unsigned long j)
++inline unsigned int jiffies_to_msecs(const unsigned long j)
+ {
+ #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
+ }
+ EXPORT_SYMBOL(jiffies_to_msecs);
+
+-unsigned int inline jiffies_to_usecs(const unsigned long j)
++inline unsigned int jiffies_to_usecs(const unsigned long j)
+ {
+ #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+ return (USEC_PER_SEC / HZ) * j;
+diff -urNp linux-2.6.31.7/kernel/trace/ftrace.c linux-2.6.31.7/kernel/trace/ftrace.c
+--- linux-2.6.31.7/kernel/trace/ftrace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/trace/ftrace.c 2009-12-08 17:39:44.298811459 -0500
+@@ -1562,7 +1562,7 @@ static int t_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations show_ftrace_seq_ops = {
++static const struct seq_operations show_ftrace_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .stop = t_stop,
+@@ -2560,7 +2560,7 @@ static int g_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations ftrace_graph_seq_ops = {
++static const struct seq_operations ftrace_graph_seq_ops = {
+ .start = g_start,
+ .next = g_next,
+ .stop = g_stop,
+diff -urNp linux-2.6.31.7/kernel/trace/Kconfig linux-2.6.31.7/kernel/trace/Kconfig
+--- linux-2.6.31.7/kernel/trace/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/trace/Kconfig 2009-12-08 17:39:44.297815263 -0500
+@@ -111,6 +111,7 @@ if FTRACE
+ config FUNCTION_TRACER
+ bool "Kernel Function Tracer"
+ depends on HAVE_FUNCTION_TRACER
++ depends on !PAX_KERNEXEC
+ select FRAME_POINTER
+ select KALLSYMS
+ select GENERIC_TRACER
+@@ -326,6 +327,7 @@ config POWER_TRACER
+ config STACK_TRACER
+ bool "Trace max stack"
+ depends on HAVE_FUNCTION_TRACER
++ depends on !PAX_KERNEXEC
+ select FUNCTION_TRACER
+ select STACKTRACE
+ select KALLSYMS
+diff -urNp linux-2.6.31.7/kernel/trace/trace.c linux-2.6.31.7/kernel/trace/trace.c
+--- linux-2.6.31.7/kernel/trace/trace.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/trace/trace.c 2009-12-08 17:39:44.299810674 -0500
+@@ -1885,7 +1885,7 @@ static int s_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations tracer_seq_ops = {
++static const struct seq_operations tracer_seq_ops = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+@@ -2097,7 +2097,7 @@ static int t_show(struct seq_file *m, vo
+ return 0;
+ }
+
+-static struct seq_operations show_traces_seq_ops = {
++static const struct seq_operations show_traces_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .stop = t_stop,
+@@ -2292,23 +2292,23 @@ tracing_trace_options_read(struct file *
+ /* Try to assign a tracer specific option */
+ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ {
+- struct tracer_flags *trace_flags = trace->flags;
++ struct tracer_flags *tracer_flags = trace->flags;
+ struct tracer_opt *opts = NULL;
+ int ret = 0, i = 0;
+ int len;
+
+- for (i = 0; trace_flags->opts[i].name; i++) {
+- opts = &trace_flags->opts[i];
++ for (i = 0; tracer_flags->opts[i].name; i++) {
++ opts = &tracer_flags->opts[i];
+ len = strlen(opts->name);
+
+ if (strncmp(cmp, opts->name, len) == 0) {
+- ret = trace->set_flag(trace_flags->val,
++ ret = trace->set_flag(tracer_flags->val,
+ opts->bit, !neg);
+ break;
+ }
+ }
+ /* Not found */
+- if (!trace_flags->opts[i].name)
++ if (!tracer_flags->opts[i].name)
+ return -EINVAL;
+
+ /* Refused to handle */
+@@ -2316,9 +2316,9 @@ static int set_tracer_option(struct trac
+ return ret;
+
+ if (neg)
+- trace_flags->val &= ~opts->bit;
++ tracer_flags->val &= ~opts->bit;
+ else
+- trace_flags->val |= opts->bit;
++ tracer_flags->val |= opts->bit;
+
+ return 0;
+ }
+@@ -3685,10 +3685,9 @@ static const struct file_operations trac
+ };
+ #endif
+
+-static struct dentry *d_tracer;
+-
+ struct dentry *tracing_init_dentry(void)
+ {
++ static struct dentry *d_tracer;
+ static int once;
+
+ if (d_tracer)
+@@ -3708,10 +3707,9 @@ struct dentry *tracing_init_dentry(void)
+ return d_tracer;
+ }
+
+-static struct dentry *d_percpu;
+-
+ struct dentry *tracing_dentry_percpu(void)
+ {
++ static struct dentry *d_percpu;
+ static int once;
+ struct dentry *d_tracer;
+
+diff -urNp linux-2.6.31.7/kernel/trace/trace_output.c linux-2.6.31.7/kernel/trace/trace_output.c
+--- linux-2.6.31.7/kernel/trace/trace_output.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/trace/trace_output.c 2009-12-08 17:39:44.299810674 -0500
+@@ -234,7 +234,7 @@ int trace_seq_path(struct trace_seq *s,
+ return 0;
+ p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+ if (!IS_ERR(p)) {
+- p = mangle_path(s->buffer + s->len, p, "\n");
++ p = mangle_path(s->buffer + s->len, p, "\n\\");
+ if (p) {
+ s->len = p - s->buffer;
+ return 1;
+diff -urNp linux-2.6.31.7/kernel/utsname_sysctl.c linux-2.6.31.7/kernel/utsname_sysctl.c
+--- linux-2.6.31.7/kernel/utsname_sysctl.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/kernel/utsname_sysctl.c 2009-12-08 17:39:44.300811069 -0500
+@@ -123,7 +123,7 @@ static struct ctl_table uts_kern_table[]
+ .proc_handler = proc_do_uts_string,
+ .strategy = sysctl_uts_string,
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static struct ctl_table uts_root_table[] = {
+@@ -133,7 +133,7 @@ static struct ctl_table uts_root_table[]
+ .mode = 0555,
+ .child = uts_kern_table,
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static int __init utsname_sysctl_init(void)
+diff -urNp linux-2.6.31.7/lib/inflate.c linux-2.6.31.7/lib/inflate.c
+--- linux-2.6.31.7/lib/inflate.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/inflate.c 2009-12-08 17:39:44.301812583 -0500
+@@ -266,7 +266,7 @@ static void free(void *where)
+ malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+
+diff -urNp linux-2.6.31.7/lib/Kconfig.debug linux-2.6.31.7/lib/Kconfig.debug
+--- linux-2.6.31.7/lib/Kconfig.debug 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/Kconfig.debug 2009-12-08 17:39:44.300811069 -0500
+@@ -866,7 +866,7 @@ config LATENCYTOP
+ select STACKTRACE
+ select SCHEDSTATS
+ select SCHED_DEBUG
+- depends on HAVE_LATENCYTOP_SUPPORT
++ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
+ help
+ Enable this option if you want to use the LatencyTOP tool
+ to find out which userspace is blocking on what kernel operations.
+diff -urNp linux-2.6.31.7/lib/kobject.c linux-2.6.31.7/lib/kobject.c
+--- linux-2.6.31.7/lib/kobject.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/kobject.c 2009-12-08 17:39:44.301812583 -0500
+@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
+ return ret;
+ }
+
+-struct sysfs_ops kobj_sysfs_ops = {
++const struct sysfs_ops kobj_sysfs_ops = {
+ .show = kobj_attr_show,
+ .store = kobj_attr_store,
+ };
+@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
+ * If the kset was not able to be created, NULL will be returned.
+ */
+ static struct kset *kset_create(const char *name,
+- struct kset_uevent_ops *uevent_ops,
++ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
+ {
+ struct kset *kset;
+@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
+ * If the kset was not able to be created, NULL will be returned.
+ */
+ struct kset *kset_create_and_add(const char *name,
+- struct kset_uevent_ops *uevent_ops,
++ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
+ {
+ struct kset *kset;
+diff -urNp linux-2.6.31.7/lib/kobject_uevent.c linux-2.6.31.7/lib/kobject_uevent.c
+--- linux-2.6.31.7/lib/kobject_uevent.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/kobject_uevent.c 2009-12-08 17:39:44.301812583 -0500
+@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
+ const char *subsystem;
+ struct kobject *top_kobj;
+ struct kset *kset;
+- struct kset_uevent_ops *uevent_ops;
++ const struct kset_uevent_ops *uevent_ops;
+ u64 seq;
+ int i = 0;
+ int retval = 0;
+diff -urNp linux-2.6.31.7/lib/parser.c linux-2.6.31.7/lib/parser.c
+--- linux-2.6.31.7/lib/parser.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/parser.c 2009-12-08 17:39:44.301812583 -0500
+@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
+ char *buf;
+ int ret;
+
+- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
++ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, s->from, s->to - s->from);
+diff -urNp linux-2.6.31.7/lib/radix-tree.c linux-2.6.31.7/lib/radix-tree.c
+--- linux-2.6.31.7/lib/radix-tree.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/radix-tree.c 2009-12-08 17:39:44.302815507 -0500
+@@ -81,7 +81,7 @@ struct radix_tree_preload {
+ int nr;
+ struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+
+ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+ {
+diff -urNp linux-2.6.31.7/lib/random32.c linux-2.6.31.7/lib/random32.c
+--- linux-2.6.31.7/lib/random32.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/lib/random32.c 2009-12-08 17:39:44.302815507 -0500
+@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
+ */
+ static inline u32 __seed(u32 x, u32 m)
+ {
+- return (x < m) ? x + m : x;
++ return (x <= m) ? x + m + 1 : x;
+ }
+
+ /**
+diff -urNp linux-2.6.31.7/localversion-grsec linux-2.6.31.7/localversion-grsec
+--- linux-2.6.31.7/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.31.7/localversion-grsec 2009-12-08 17:39:44.302815507 -0500
+@@ -0,0 +1 @@
++-grsec
+diff -urNp linux-2.6.31.7/Makefile linux-2.6.31.7/Makefile
+--- linux-2.6.31.7/Makefile 2009-12-08 17:29:51.568671976 -0500
++++ linux-2.6.31.7/Makefile 2009-12-08 17:39:42.712640039 -0500
+@@ -221,8 +221,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -639,7 +639,7 @@ export mod_strip_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+diff -urNp linux-2.6.31.7/mm/filemap.c linux-2.6.31.7/mm/filemap.c
+--- linux-2.6.31.7/mm/filemap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/filemap.c 2009-12-08 17:39:44.303815856 -0500
+@@ -1648,7 +1648,7 @@ page_not_uptodate:
+ }
+ EXPORT_SYMBOL(filemap_fault);
+
+-struct vm_operations_struct generic_file_vm_ops = {
++const struct vm_operations_struct generic_file_vm_ops = {
+ .fault = filemap_fault,
+ };
+
+@@ -1659,7 +1659,7 @@ int generic_file_mmap(struct file * file
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+- return -ENOEXEC;
++ return -ENODEV;
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+@@ -2019,6 +2019,7 @@ inline int generic_write_checks(struct f
+ *pos = i_size_read(inode);
+
+ if (limit != RLIM_INFINITY) {
++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+ if (*pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+diff -urNp linux-2.6.31.7/mm/filemap_xip.c linux-2.6.31.7/mm/filemap_xip.c
+--- linux-2.6.31.7/mm/filemap_xip.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/filemap_xip.c 2009-12-08 17:39:44.304812245 -0500
+@@ -296,7 +296,7 @@ out:
+ }
+ }
+
+-static struct vm_operations_struct xip_file_vm_ops = {
++static const struct vm_operations_struct xip_file_vm_ops = {
+ .fault = xip_file_fault,
+ };
+
+diff -urNp linux-2.6.31.7/mm/fremap.c linux-2.6.31.7/mm/fremap.c
+--- linux-2.6.31.7/mm/fremap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/fremap.c 2009-12-08 17:39:44.304812245 -0500
+@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ retry:
+ vma = find_vma(mm, start);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++ goto out;
++#endif
++
+ /*
+ * Make sure the vma is shared, that it supports prefaulting,
+ * and that the remapped range is valid and fully within
+diff -urNp linux-2.6.31.7/mm/highmem.c linux-2.6.31.7/mm/highmem.c
+--- linux-2.6.31.7/mm/highmem.c 2009-12-08 17:29:51.640746883 -0500
++++ linux-2.6.31.7/mm/highmem.c 2009-12-08 17:39:44.304812245 -0500
+@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
++ pax_open_kernel();
+ pte_clear(&init_mm, (unsigned long)page_address(page),
+ &pkmap_page_table[i]);
+-
++ pax_close_kernel();
+ set_page_address(page, NULL);
+ need_flush = 1;
+ }
+@@ -177,9 +178,11 @@ start:
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++ pax_open_kernel();
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+-
++ pax_close_kernel();
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
+
+diff -urNp linux-2.6.31.7/mm/hugetlb.c linux-2.6.31.7/mm/hugetlb.c
+--- linux-2.6.31.7/mm/hugetlb.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/hugetlb.c 2009-12-08 17:39:44.305815009 -0500
+@@ -1689,7 +1689,7 @@ static int hugetlb_vm_op_fault(struct vm
+ return 0;
+ }
+
+-struct vm_operations_struct hugetlb_vm_ops = {
++const struct vm_operations_struct hugetlb_vm_ops = {
+ .fault = hugetlb_vm_op_fault,
+ .open = hugetlb_vm_op_open,
+ .close = hugetlb_vm_op_close,
+@@ -1892,6 +1892,26 @@ static int unmap_ref_private(struct mm_s
+ return 1;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *vma_m;
++ unsigned long address_m;
++ pte_t *ptep_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++ get_page(page_m);
++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, pte_t pte,
+ struct page *pagecache_page)
+@@ -1963,6 +1983,11 @@ retry_avoidcopy:
+ huge_ptep_clear_flush(vma, address, ptep);
+ set_huge_pte_at(mm, address, ptep,
+ make_huge_pte(vma, new_page, 1));
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+ /* Make the old page be freed below */
+ new_page = old_page;
+ }
+@@ -2072,6 +2097,10 @@ retry:
+ && (vma->vm_flags & VM_SHARED)));
+ set_huge_pte_at(mm, address, ptep, new_pte);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, page);
++#endif
++
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+@@ -2100,6 +2129,28 @@ int hugetlb_fault(struct mm_struct *mm,
+ static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+ struct hstate *h = hstate_vma(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ h = hstate_vma(vma);
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++ return VM_FAULT_OOM;
++ address_m &= HPAGE_MASK;
++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++ }
++#endif
++
+ ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
+diff -urNp linux-2.6.31.7/mm/Kconfig linux-2.6.31.7/mm/Kconfig
+--- linux-2.6.31.7/mm/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/Kconfig 2009-12-08 17:39:44.302815507 -0500
+@@ -216,7 +216,7 @@ config MMU_NOTIFIER
+
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+- default 4096
++ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+diff -urNp linux-2.6.31.7/mm/maccess.c linux-2.6.31.7/mm/maccess.c
+--- linux-2.6.31.7/mm/maccess.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/maccess.c 2009-12-08 17:39:44.305815009 -0500
+@@ -14,7 +14,7 @@
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-long probe_kernel_read(void *dst, void *src, size_t size)
++long probe_kernel_read(void *dst, const void *src, size_t size)
+ {
+ long ret;
+ mm_segment_t old_fs = get_fs();
+@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
++long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ long ret;
+ mm_segment_t old_fs = get_fs();
+diff -urNp linux-2.6.31.7/mm/madvise.c linux-2.6.31.7/mm/madvise.c
+--- linux-2.6.31.7/mm/madvise.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/madvise.c 2009-12-08 17:39:44.305815009 -0500
+@@ -43,6 +43,10 @@ static long madvise_behavior(struct vm_a
+ pgoff_t pgoff;
+ int new_flags = vma->vm_flags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ switch (behavior) {
+ case MADV_NORMAL:
+ new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -92,6 +96,13 @@ success:
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+ vma->vm_flags = new_flags;
+
+ out:
+@@ -235,6 +246,17 @@ madvise_vma(struct vm_area_struct *vma,
+
+ case MADV_DONTNEED:
+ error = madvise_dontneed(vma, prev, start, end);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!error) {
++ struct vm_area_struct *vma_m, *prev_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ error = madvise_dontneed(vma_m, &prev_m, start + SEGMEXEC_TASK_SIZE, end + SEGMEXEC_TASK_SIZE);
++ }
++#endif
++
+ break;
+
+ default:
+@@ -328,6 +350,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
+ if (end < start)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ goto out;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ goto out;
++
+ error = 0;
+ if (end == start)
+ goto out;
+diff -urNp linux-2.6.31.7/mm/memory.c linux-2.6.31.7/mm/memory.c
+--- linux-2.6.31.7/mm/memory.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/memory.c 2009-12-08 17:39:44.306808692 -0500
+@@ -47,6 +47,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/rmap.h>
+ #include <linux/module.h>
++#include <linux/security.h>
+ #include <linux/delayacct.h>
+ #include <linux/init.h>
+ #include <linux/writeback.h>
+@@ -1228,11 +1229,11 @@ int __get_user_pages(struct task_struct
+ vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+- do {
++ while (nr_pages) {
+ struct vm_area_struct *vma;
+ unsigned int foll_flags;
+
+- vma = find_extend_vma(mm, start);
++ vma = find_vma(mm, start);
+ if (!vma && in_gate_area(tsk, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ struct vm_area_struct *gate_vma = get_gate_vma(tsk);
+@@ -1274,7 +1275,7 @@ int __get_user_pages(struct task_struct
+ continue;
+ }
+
+- if (!vma ||
++ if (!vma || start < vma->vm_start ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ (!ignore && !(vm_flags & vma->vm_flags)))
+ return i ? : -EFAULT;
+@@ -1360,7 +1361,7 @@ int __get_user_pages(struct task_struct
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+- } while (nr_pages);
++ }
+ return i;
+ }
+
+@@ -1926,6 +1927,186 @@ static inline void cow_user_page(struct
+ copy_user_highpage(dst, src, va, vma);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ spinlock_t *ptl;
++ pte_t *pte, entry;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (!pte_none(entry)) {
++ BUG_ON(pte_file(entry));
++ free_swap_and_cache(pte_to_swp_entry(entry));
++ pte_clear_not_present_full(mm, address, pte, 0);
++ }
++ } else {
++ struct page *page;
++
++ flush_cache_page(vma, address, pte_pfn(entry));
++ entry = ptep_clear_flush(vma, address, pte);
++ BUG_ON(pte_dirty(entry));
++ page = vm_normal_page(vma, address, entry);
++ if (page) {
++ update_hiwater_rss(mm);
++ if (PageAnon(page))
++ dec_mm_counter(mm, anon_rss);
++ else
++ dec_mm_counter(mm, file_rss);
++ page_remove_rmap(page);
++ page_cache_release(page);
++ }
++ }
++ pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || !PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(!PageLocked(page_m));
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map_nested(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_anon_rmap(page_m, vma_m, address_m);
++ inc_mm_counter(mm, anon_rss);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap_nested(pte_m);
++ unlock_page(page_m);
++}
++
++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map_nested(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_file_rmap(page_m);
++ inc_mm_counter(mm, file_rss);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap_nested(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map_nested(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap_nested(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++ struct page *page_m;
++ pte_t entry;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++ goto out;
++
++ entry = *pte;
++ page_m = vm_normal_page(vma, address, entry);
++ if (!page_m)
++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++ else if (PageAnon(page_m)) {
++ if (pax_find_mirror_vma(vma)) {
++ pte_unmap_unlock(pte, ptl);
++ lock_page(page_m);
++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++ if (pte_same(entry, *pte))
++ pax_mirror_anon_pte(vma, address, page_m, ptl);
++ else
++ unlock_page(page_m);
++ }
++ } else
++ pax_mirror_file_pte(vma, address, page_m, ptl);
++
++out:
++ pte_unmap_unlock(pte, ptl);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -2098,6 +2279,12 @@ gotten:
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(new_page));
++#endif
++
+ if (old_page) {
+ if (!PageAnon(old_page)) {
+ dec_mm_counter(mm, file_rss);
+@@ -2144,6 +2331,10 @@ gotten:
+ page_remove_rmap(old_page);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, new_page, ptl);
++#endif
++
+ /* Free the old page.. */
+ new_page = old_page;
+ ret |= VM_FAULT_WRITE;
+@@ -2425,6 +2616,7 @@ int vmtruncate(struct inode * inode, lof
+ unsigned long limit;
+
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
++ gr_learn_resource(current, RLIMIT_FSIZE, offset, 1);
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+@@ -2587,6 +2779,11 @@ static int do_swap_page(struct mm_struct
+ swap_free(entry);
+ if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+ unlock_page(page);
+
+ if (flags & FAULT_FLAG_WRITE) {
+@@ -2598,6 +2795,11 @@ static int do_swap_page(struct mm_struct
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ out:
+@@ -2644,12 +2846,23 @@ static int do_anonymous_page(struct mm_s
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (!pte_none(*page_table))
+ goto release;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ inc_mm_counter(mm, anon_rss);
+ page_add_new_anon_rmap(page, vma, address);
+ set_pte_at(mm, address, page_table, entry);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, entry);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ return 0;
+@@ -2786,6 +2999,12 @@ static int __do_fault(struct mm_struct *
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon && pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ flush_icache_page(vma, page);
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (flags & FAULT_FLAG_WRITE)
+@@ -2805,6 +3024,14 @@ static int __do_fault(struct mm_struct *
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, address, entry);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++ else
++ pax_mirror_file_pte(vma, address, page, ptl);
++#endif
++
+ } else {
+ if (charged)
+ mem_cgroup_uncharge_page(page);
+@@ -2952,6 +3179,12 @@ static inline int handle_pte_fault(struc
+ if (flags & FAULT_FLAG_WRITE)
+ flush_tlb_page(vma, address);
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_pte(vma, address, pte, pmd, ptl);
++ return 0;
++#endif
++
+ unlock:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+@@ -2968,6 +3201,10 @@ int handle_mm_fault(struct mm_struct *mm
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+
+ count_vm_event(PGFAULT);
+@@ -2975,6 +3212,34 @@ int handle_mm_fault(struct mm_struct *mm
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++ pgd_t *pgd_m;
++ pud_t *pud_m;
++ pmd_t *pmd_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pud_m = pud_alloc(mm, pgd_m, address_m);
++ if (!pud_m)
++ return VM_FAULT_OOM;
++ pmd_m = pmd_alloc(mm, pud_m, address_m);
++ if (!pmd_m)
++ return VM_FAULT_OOM;
++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
++ return VM_FAULT_OOM;
++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++ }
++#endif
++
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3072,7 +3337,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+diff -urNp linux-2.6.31.7/mm/mempolicy.c linux-2.6.31.7/mm/mempolicy.c
+--- linux-2.6.31.7/mm/mempolicy.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/mempolicy.c 2009-12-08 17:39:44.307783535 -0500
+@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
+ struct vm_area_struct *next;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ err = 0;
+ for (; vma && vma->vm_start < end; vma = next) {
+ next = vma->vm_next;
+@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
+ err = policy_vma(vma, new);
+ if (err)
+ break;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ err = policy_vma(vma_m, new);
++ if (err)
++ break;
++ }
++#endif
++
+ }
+ return err;
+ }
+@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
+
+ if (end < start)
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (end == start)
+ return 0;
+
+@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
+ if (!mm)
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+@@ -2386,7 +2418,7 @@ int show_numa_map(struct seq_file *m, vo
+
+ if (file) {
+ seq_printf(m, " file=");
+- seq_path(m, &file->f_path, "\n\t= ");
++ seq_path(m, &file->f_path, "\n\t\\= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_printf(m, " heap");
+ } else if (vma->vm_start <= mm->start_stack &&
+diff -urNp linux-2.6.31.7/mm/migrate.c linux-2.6.31.7/mm/migrate.c
+--- linux-2.6.31.7/mm/migrate.c 2009-12-08 17:29:51.641741415 -0500
++++ linux-2.6.31.7/mm/migrate.c 2009-12-08 17:39:44.307783535 -0500
+@@ -1087,6 +1087,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
+ if (!mm)
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1096,8 +1104,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+diff -urNp linux-2.6.31.7/mm/mlock.c linux-2.6.31.7/mm/mlock.c
+--- linux-2.6.31.7/mm/mlock.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/mlock.c 2009-12-08 17:39:44.308813012 -0500
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <linux/security.h>
+ #include <linux/sched.h>
+ #include <linux/module.h>
+ #include <linux/rmap.h>
+@@ -412,6 +413,17 @@ static int do_mlock(unsigned long start,
+ return -EINVAL;
+ if (end == start)
+ return 0;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma_prev(current->mm, start, &prev);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -471,6 +483,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+ lock_limit >>= PAGE_SHIFT;
+
+ /* check against resource limits */
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
+ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+ error = do_mlock(start, len, 1);
+ up_write(&current->mm->mmap_sem);
+@@ -492,10 +505,10 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+ static int do_mlockall(int flags)
+ {
+ struct vm_area_struct * vma, * prev = NULL;
+- unsigned int def_flags = 0;
++ unsigned int def_flags = current->mm->def_flags & ~VM_LOCKED;
+
+ if (flags & MCL_FUTURE)
+- def_flags = VM_LOCKED;
++ def_flags |= VM_LOCKED;
+ current->mm->def_flags = def_flags;
+ if (flags == MCL_FUTURE)
+ goto out;
+@@ -503,6 +516,12 @@ static int do_mlockall(int flags)
+ for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+ unsigned int newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
++ BUG_ON(vma->vm_end > TASK_SIZE);
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+@@ -534,6 +553,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ lock_limit >>= PAGE_SHIFT;
+
+ ret = -ENOMEM;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
+ if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+ capable(CAP_IPC_LOCK))
+ ret = do_mlockall(flags);
+diff -urNp linux-2.6.31.7/mm/mmap.c linux-2.6.31.7/mm/mmap.c
+--- linux-2.6.31.7/mm/mmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/mmap.c 2009-12-08 17:39:44.309812227 -0500
+@@ -45,6 +45,16 @@
+ #define arch_rebalance_pgtables(addr, len) (addr)
+ #endif
+
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++ up_read(&mm->mmap_sem);
++ BUG();
++ }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end);
+@@ -70,16 +80,25 @@ static void unmap_region(struct mm_struc
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+ pgprot_t vm_get_page_prot(unsigned long vm_flags)
+ {
+- return __pgprot(pgprot_val(protection_map[vm_flags &
++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if (!nx_enabled &&
++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++ (vm_flags & (VM_READ | VM_WRITE)))
++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++ return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+
+@@ -231,6 +250,7 @@ static struct vm_area_struct *remove_vma
+ struct vm_area_struct *next = vma->vm_next;
+
+ might_sleep();
++ BUG_ON(vma->vm_mirror);
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+@@ -267,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ * not page aligned -Ram Gupta
+ */
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
+ if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+ (mm->end_data - mm->start_data) > rlim)
+ goto out;
+@@ -696,6 +717,12 @@ static int
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ if (vma->vm_pgoff == vm_pgoff)
+@@ -715,6 +742,12 @@ static int
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+ pgoff_t vm_pglen;
+@@ -757,12 +790,19 @@ can_vma_merge_after(struct vm_area_struc
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags,
+- struct anon_vma *anon_vma, struct file *file,
++ struct anon_vma *anon_vma, struct file *file,
+ pgoff_t pgoff, struct mempolicy *policy)
+ {
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+ struct vm_area_struct *area, *next;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+ /*
+ * We later require that vma->vm_flags == vm_flags,
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -778,6 +818,15 @@ struct vm_area_struct *vma_merge(struct
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = next->vm_next;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev)
++ prev_m = pax_find_mirror_vma(prev);
++ if (area)
++ area_m = pax_find_mirror_vma(area);
++ if (next)
++ next_m = pax_find_mirror_vma(next);
++#endif
++
+ /*
+ * Can it merge with the predecessor?
+ */
+@@ -797,9 +846,24 @@ struct vm_area_struct *vma_merge(struct
+ /* cases 1, 6 */
+ vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL);
+- } else /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev_m)
++ vma_adjust(prev_m, prev_m->vm_start,
++ next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 2, 5, 7 */
+ vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev_m)
++ vma_adjust(prev_m, prev_m->vm_start,
++ end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ }
+ return prev;
+ }
+
+@@ -810,12 +874,27 @@ struct vm_area_struct *vma_merge(struct
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen)) {
+- if (prev && addr < prev->vm_end) /* case 4 */
++ if (prev && addr < prev->vm_end) { /* case 4 */
+ vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL);
+- else /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev_m)
++ vma_adjust(prev_m, prev_m->vm_start,
++ addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 3, 8 */
+ vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (area_m)
++ vma_adjust(area_m, addr_m, next_m->vm_end,
++ next_m->vm_pgoff - pglen, NULL);
++#endif
++
++ }
+ return area;
+ }
+
+@@ -890,14 +969,11 @@ none:
+ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+ {
+- const unsigned long stack_flags
+- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+-
+ if (file) {
+ mm->shared_vm += pages;
+ if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+ mm->exec_vm += pages;
+- } else if (flags & stack_flags)
++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
+ mm->stack_vm += pages;
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+@@ -924,7 +1000,7 @@ unsigned long do_mmap_pgoff(struct file
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
+
+@@ -934,15 +1010,15 @@ unsigned long do_mmap_pgoff(struct file
+ if (!(flags & MAP_FIXED))
+ addr = round_hint_to_min(addr);
+
+- error = arch_mmap_check(addr, len, flags);
+- if (error)
+- return error;
+-
+ /* Careful about overflows.. */
+ len = PAGE_ALIGN(len);
+ if (!len || len > TASK_SIZE)
+ return -ENOMEM;
+
++ error = arch_mmap_check(addr, len, flags);
++ if (error)
++ return error;
++
+ /* offset overflow? */
+ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+ return -EOVERFLOW;
+@@ -954,7 +1030,7 @@ unsigned long do_mmap_pgoff(struct file
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -965,6 +1041,26 @@ unsigned long do_mmap_pgoff(struct file
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++ if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++ else
++ vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++ }
++#endif
++
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++ vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+ if (flags & MAP_LOCKED) {
+ if (!can_do_mlock())
+ return -EPERM;
+@@ -978,6 +1074,7 @@ unsigned long do_mmap_pgoff(struct file
+ locked += mm->locked_vm;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+@@ -1051,6 +1148,9 @@ unsigned long do_mmap_pgoff(struct file
+ if (error)
+ return error;
+
++ if (!gr_acl_handle_mmap(file, prot))
++ return -EACCES;
++
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+ }
+ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1063,10 +1163,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
+ */
+ int vma_wants_writenotify(struct vm_area_struct *vma)
+ {
+- unsigned int vm_flags = vma->vm_flags;
++ unsigned long vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+@@ -1115,14 +1215,24 @@ unsigned long mmap_region(struct file *f
+ unsigned long charged = 0;
+ struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ /* Clear old maps */
+ error = -ENOMEM;
+-munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limit. */
+@@ -1171,6 +1281,16 @@ munmap_back:
+ goto unacct_error;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto free_vma;
++ }
++ }
++#endif
++
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+@@ -1193,6 +1313,19 @@ munmap_back:
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m && (vm_flags & VM_EXECUTABLE))
++ added_exe_file_vma(mm);
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++ vma->vm_flags |= VM_PAGEEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ }
++#endif
++
+ if (vm_flags & VM_EXECUTABLE)
+ added_exe_file_vma(mm);
+ } else if (vm_flags & VM_SHARED) {
+@@ -1216,6 +1349,11 @@ munmap_back:
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ pax_mirror_vma(vma_m, vma);
++#endif
++
+ /* Once vma denies write, undo our temporary denial count */
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+@@ -1224,6 +1362,7 @@ out:
+
+ mm->total_vm += len >> PAGE_SHIFT;
+ vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
++ track_exec_limit(mm, addr, addr + len, vm_flags);
+ if (vm_flags & VM_LOCKED) {
+ /*
+ * makes pages present; downgrades, drops, reacquires mmap_sem
+@@ -1246,6 +1385,12 @@ unmap_and_free_vma:
+ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+ charged = 0;
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+ kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+ if (charged)
+@@ -1279,6 +1424,10 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+@@ -1287,10 +1436,10 @@ arch_get_unmapped_area(struct file *filp
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -1301,9 +1450,8 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- addr = TASK_UNMAPPED_BASE;
+- start_addr = addr;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+@@ -1325,10 +1473,16 @@ full_search:
+
+ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
+ mm->free_area_cache = addr;
+ mm->cached_hole_size = ~0UL;
+ }
+@@ -1346,7 +1500,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -1355,6 +1509,10 @@ arch_get_unmapped_area_topdown(struct fi
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+@@ -1412,13 +1570,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+@@ -1427,6 +1593,12 @@ bottomup:
+
+ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+@@ -1434,8 +1606,10 @@ void arch_unmap_area_topdown(struct mm_s
+ mm->free_area_cache = addr;
+
+ /* dont allow allocations above current base */
+- if (mm->free_area_cache > mm->mmap_base)
++ if (mm->free_area_cache > mm->mmap_base) {
+ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ }
+ }
+
+ unsigned long
+@@ -1535,6 +1709,27 @@ out:
+ return prev ? prev->vm_next : vma;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++ struct vm_area_struct *vma_m;
++
++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++ BUG_ON(vma->vm_mirror);
++ return NULL;
++ }
++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++ vma_m = vma->vm_mirror;
++ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++ BUG_ON(vma->vm_file != vma_m->vm_file);
++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
++ return vma_m;
++}
++#endif
++
+ /*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+@@ -1551,6 +1746,7 @@ static int acct_stack_growth(struct vm_a
+ return -ENOMEM;
+
+ /* Stack limit test */
++ gr_learn_resource(current, RLIMIT_STACK, size, 1);
+ if (size > rlim[RLIMIT_STACK].rlim_cur)
+ return -ENOMEM;
+
+@@ -1560,6 +1756,7 @@ static int acct_stack_growth(struct vm_a
+ unsigned long limit;
+ locked = mm->locked_vm + grow;
+ limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > limit && !capable(CAP_IPC_LOCK))
+ return -ENOMEM;
+ }
+@@ -1595,35 +1792,40 @@ static
+ #endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+- int error;
++ int error, locknext;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
++ /* Also guard against wrapping around to address 0. */
++ if (address < PAGE_ALIGN(address+1))
++ address = PAGE_ALIGN(address+1);
++ else
++ return -ENOMEM;
++
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++ if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
++ return -ENOMEM;
+ anon_vma_lock(vma);
++ if (locknext)
++ anon_vma_lock(vma->vm_next);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+- * anon_vma lock to serialize against concurrent expand_stacks.
+- * Also guard against wrapping around to address 0.
++ * anon_vma locks to serialize against concurrent expand_stacks
++ * and expand_upwards.
+ */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else {
+- anon_vma_unlock(vma);
+- return -ENOMEM;
+- }
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+- if (address > vma->vm_end) {
++ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+@@ -1633,6 +1835,8 @@ int expand_upwards(struct vm_area_struct
+ if (!error)
+ vma->vm_end = address;
+ }
++ if (locknext)
++ anon_vma_unlock(vma->vm_next);
+ anon_vma_unlock(vma);
+ return error;
+ }
+@@ -1644,7 +1848,8 @@ int expand_upwards(struct vm_area_struct
+ static int expand_downwards(struct vm_area_struct *vma,
+ unsigned long address)
+ {
+- int error;
++ int error, lockprev = 0;
++ struct vm_area_struct *prev = NULL;
+
+ /*
+ * We must make sure the anon_vma is allocated
+@@ -1658,6 +1863,15 @@ static int expand_downwards(struct vm_ar
+ if (error)
+ return error;
+
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++ find_vma_prev(vma->vm_mm, address, &prev);
++ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++ if (lockprev && unlikely(anon_vma_prepare(prev)))
++ return -ENOMEM;
++ if (lockprev)
++ anon_vma_lock(prev);
++
+ anon_vma_lock(vma);
+
+ /*
+@@ -1667,9 +1881,15 @@ static int expand_downwards(struct vm_ar
+ */
+
+ /* Somebody else might have raced and expanded it already */
+- if (address < vma->vm_start) {
++ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+ unsigned long size, grow;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+@@ -1677,9 +1897,20 @@ static int expand_downwards(struct vm_ar
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ vma_m->vm_start -= grow << PAGE_SHIFT;
++ vma_m->vm_pgoff -= grow;
++ }
++#endif
++
+ }
+ }
+ anon_vma_unlock(vma);
++ if (lockprev)
++ anon_vma_unlock(prev);
+ return error;
+ }
+
+@@ -1755,6 +1986,13 @@ static void remove_vma_list(struct mm_st
+ do {
+ long nrpages = vma_pages(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++ vma = remove_vma(vma);
++ continue;
++ }
++#endif
++
+ mm->total_vm -= nrpages;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ vma = remove_vma(vma);
+@@ -1799,6 +2037,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mirror) {
++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++ vma->vm_mirror->vm_mirror = NULL;
++ vma->vm_mirror->vm_flags &= ~VM_EXEC;
++ vma->vm_mirror = NULL;
++ }
++#endif
++
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+ tail_vma = vma;
+@@ -1818,6 +2066,108 @@ detach_vmas_to_be_unmapped(struct mm_str
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
++ unsigned long addr, int new_below)
++{
++ struct mempolicy *pol;
++ struct vm_area_struct *new, *vma_m, *new_m = NULL;
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++
++ if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
++ return -EINVAL;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++ if (mm->map_count >= sysctl_max_map_count-1)
++ return -ENOMEM;
++ } else if (mm->map_count >= sysctl_max_map_count)
++ return -ENOMEM;
++
++ new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!new)
++ return -ENOMEM;
++
++ if (vma_m) {
++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!new_m) {
++ kmem_cache_free(vm_area_cachep, new);
++ return -ENOMEM;
++ }
++ }
++
++ /* most fields are the same, copy all, and then fixup */
++ *new = *vma;
++
++ if (new_below)
++ new->vm_end = addr;
++ else {
++ new->vm_start = addr;
++ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
++ }
++
++ if (vma_m) {
++ *new_m = *vma_m;
++ new_m->vm_mirror = new;
++ new->vm_mirror = new_m;
++
++ if (new_below)
++ new_m->vm_end = addr_m;
++ else {
++ new_m->vm_start = addr_m;
++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++ }
++ }
++
++ pol = mpol_dup(vma_policy(vma));
++ if (IS_ERR(pol)) {
++ if (new_m)
++ kmem_cache_free(vm_area_cachep, new_m);
++ kmem_cache_free(vm_area_cachep, new);
++ return PTR_ERR(pol);
++ }
++ vma_set_policy(new, pol);
++
++ if (new->vm_file) {
++ get_file(new->vm_file);
++ if (vma->vm_flags & VM_EXECUTABLE)
++ added_exe_file_vma(mm);
++ }
++
++ if (new->vm_ops && new->vm_ops->open)
++ new->vm_ops->open(new);
++
++ if (new_below)
++ vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
++ ((addr - new->vm_start) >> PAGE_SHIFT), new);
++ else
++ vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
++
++ if (vma_m) {
++ mpol_get(pol);
++ vma_set_policy(new_m, pol);
++
++ if (new_m->vm_file) {
++ get_file(new_m->vm_file);
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ added_exe_file_vma(mm);
++ }
++
++ if (new_m->vm_ops && new_m->vm_ops->open)
++ new_m->vm_ops->open(new_m);
++
++ if (new_below)
++ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++ else
++ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++ }
++
++ return 0;
++}
++#else
+ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ unsigned long addr, int new_below)
+ {
+@@ -1869,17 +2219,37 @@ int split_vma(struct mm_struct * mm, str
+
+ return 0;
+ }
++#endif
+
+ /* Munmap is split into 2 main parts -- this part which finds
+ * what needs doing, and the areas themselves, which do the
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
++#ifdef CONFIG_PAX_SEGMEXEC
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+ {
++ int ret = __do_munmap(mm, start, len);
++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++ return ret;
++
++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
++{
+ unsigned long end;
+ struct vm_area_struct *vma, *prev, *last;
+
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -1943,6 +2313,8 @@ int do_munmap(struct mm_struct *mm, unsi
+ /* Fix up all other VM information */
+ remove_vma_list(mm, vma);
+
++ track_exec_limit(mm, start, end, 0UL);
++
+ return 0;
+ }
+
+@@ -1955,22 +2327,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+
+ profile_munmap(addr);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+- WARN_ON(1);
+- up_read(&mm->mmap_sem);
+- }
+-#endif
+-}
+-
+ /*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+@@ -1984,6 +2352,11 @@ unsigned long do_brk(unsigned long addr,
+ struct rb_node ** rb_link, * rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
++ unsigned long charged;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+@@ -2001,19 +2374,34 @@ unsigned long do_brk(unsigned long addr,
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
++ charged = len >> PAGE_SHIFT;
++
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+- locked = len >> PAGE_SHIFT;
++ locked = charged;
+ locked += mm->locked_vm;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+@@ -2027,22 +2415,22 @@ unsigned long do_brk(unsigned long addr,
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+- munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
+- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
++ if (!may_expand_vm(mm, charged))
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+- if (security_vm_enough_memory(len >> PAGE_SHIFT))
++ if (security_vm_enough_memory(charged))
+ return -ENOMEM;
+
+ /* Can we just expand an old private anonymous mapping? */
+@@ -2056,10 +2444,21 @@ unsigned long do_brk(unsigned long addr,
+ */
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma) {
+- vm_unacct_memory(len >> PAGE_SHIFT);
++ vm_unacct_memory(charged);
+ return -ENOMEM;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ kmem_cache_free(vm_area_cachep, vma);
++ vm_unacct_memory(charged);
++ return -ENOMEM;
++ }
++ }
++#endif
++
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+@@ -2068,11 +2467,12 @@ unsigned long do_brk(unsigned long addr,
+ vma->vm_page_prot = vm_get_page_prot(flags);
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+- mm->total_vm += len >> PAGE_SHIFT;
++ mm->total_vm += charged;
+ if (flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+- mm->locked_vm += (len >> PAGE_SHIFT);
++ mm->locked_vm += charged;
+ }
++ track_exec_limit(mm, addr, addr + len, flags);
+ return addr;
+ }
+
+@@ -2118,8 +2518,10 @@ void exit_mmap(struct mm_struct *mm)
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
+ */
+- while (vma)
++ while (vma) {
++ vma->vm_mirror = NULL;
+ vma = remove_vma(vma);
++ }
+
+ BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ }
+@@ -2133,6 +2535,10 @@ int insert_vm_struct(struct mm_struct *
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
+ /*
+ * The vm_pgoff of a purely anonymous vma should be irrelevant
+ * until its first write fault, when page's anon_vma and index
+@@ -2155,7 +2561,22 @@ int insert_vm_struct(struct mm_struct *
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m)
++ return -ENOMEM;
++ }
++#endif
++
+ vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ pax_mirror_vma(vma_m, vma);
++#endif
++
+ return 0;
+ }
+
+@@ -2173,6 +2594,8 @@ struct vm_area_struct *copy_vma(struct v
+ struct rb_node **rb_link, *rb_parent;
+ struct mempolicy *pol;
+
++ BUG_ON(vma->vm_mirror);
++
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+@@ -2216,6 +2639,35 @@ struct vm_area_struct *copy_vma(struct v
+ return new_vma;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++ struct vm_area_struct *prev_m;
++ struct rb_node **rb_link_m, *rb_parent_m;
++ struct mempolicy *pol_m;
++
++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++ *vma_m = *vma;
++ pol_m = vma_policy(vma_m);
++ mpol_get(pol_m);
++ vma_set_policy(vma_m, pol_m);
++ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++ if (vma_m->vm_file)
++ get_file(vma_m->vm_file);
++ if (vma_m->vm_ops && vma_m->vm_ops->open)
++ vma_m->vm_ops->open(vma_m);
++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++ vma_m->vm_mirror = vma;
++ vma->vm_mirror = vma_m;
++}
++#endif
++
+ /*
+ * Return true if the calling process may expand its vm space by the passed
+ * number of pages
+@@ -2226,7 +2678,7 @@ int may_expand_vm(struct mm_struct *mm,
+ unsigned long lim;
+
+ lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+-
++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
+ if (cur + npages > lim)
+ return 0;
+ return 1;
+@@ -2267,7 +2719,7 @@ static void special_mapping_close(struct
+ {
+ }
+
+-static struct vm_operations_struct special_mapping_vmops = {
++static const struct vm_operations_struct special_mapping_vmops = {
+ .close = special_mapping_close,
+ .fault = special_mapping_fault,
+ };
+@@ -2295,6 +2747,15 @@ int install_special_mapping(struct mm_st
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++ else
++ vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++ }
++#endif
++
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+diff -urNp linux-2.6.31.7/mm/mprotect.c linux-2.6.31.7/mm/mprotect.c
+--- linux-2.6.31.7/mm/mprotect.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/mprotect.c 2009-12-08 17:39:44.309812227 -0500
+@@ -24,10 +24,16 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/perf_counter.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -132,6 +138,48 @@ static void change_protection(struct vm_
+ flush_tlb_range(vma, start, end);
+ }
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++ unsigned long oldlimit, newlimit = 0UL;
++
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
++ return;
++
++ spin_lock(&mm->page_table_lock);
++ oldlimit = mm->context.user_cs_limit;
++ if ((prot & VM_EXEC) && oldlimit < end)
++ /* USER_CS limit moved up */
++ newlimit = end;
++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++ /* USER_CS limit moved down */
++ newlimit = start;
++
++ if (newlimit) {
++ mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++ wmb();
++ cpus_clear(mm->context.cpu_user_cs_mask);
++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (newlimit == end) {
++ struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
++ if (is_vm_hugetlb_page(vma))
++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++ else
++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
++ }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -144,6 +192,14 @@ mprotect_fixup(struct vm_area_struct *vm
+ int error;
+ int dirty_accountable = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++ unsigned long start_m, end_m;
++
++ start_m = start + SEGMEXEC_TASK_SIZE;
++ end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (newflags == oldflags) {
+ *pprev = vma;
+ return 0;
+@@ -165,6 +221,38 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++ if (start != vma->vm_start) {
++ error = split_vma(mm, vma, start, 1);
++ if (error)
++ goto fail;
++ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++ *pprev = (*pprev)->vm_next;
++ }
++
++ if (end != vma->vm_end) {
++ error = split_vma(mm, vma, end, 0);
++ if (error)
++ goto fail;
++ }
++
++ if (pax_find_mirror_vma(vma)) {
++ error = __do_munmap(mm, start_m, end_m - start_m);
++ if (error)
++ goto fail;
++ } else {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto fail;
++ }
++ vma->vm_flags = newflags;
++ pax_mirror_vma(vma_m, vma);
++ }
++ }
++#endif
++
+ /*
+ * First try to merge with previous and/or next vma.
+ */
+@@ -196,8 +284,14 @@ success:
+ * held in write mode.
+ */
+ vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->binfmt && current->binfmt->handle_mprotect)
++ current->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+ vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
+- vm_get_page_prot(newflags));
++ vm_get_page_prot(vma->vm_flags));
+
+ if (vma_wants_writenotify(vma)) {
+ vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
+@@ -238,6 +332,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ end = start + len;
+ if (end <= start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (!arch_validate_prot(prot))
+ return -EINVAL;
+
+@@ -245,7 +350,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ vm_flags = calc_vm_prot_bits(prot);
+@@ -277,6 +382,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ if (start > vma->vm_start)
+ prev = vma;
+
++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
++ error = -EACCES;
++ goto out;
++ }
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->binfmt && current->binfmt->handle_mprotect)
++ current->binfmt->handle_mprotect(vma, vm_flags);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned long newflags;
+
+@@ -301,6 +416,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ if (error)
+ goto out;
+ perf_counter_mmap(vma);
++
++ track_exec_limit(current->mm, nstart, tmp, vm_flags);
++
+ nstart = tmp;
+
+ if (nstart < prev->vm_end)
+diff -urNp linux-2.6.31.7/mm/mremap.c linux-2.6.31.7/mm/mremap.c
+--- linux-2.6.31.7/mm/mremap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/mremap.c 2009-12-08 17:39:44.310816215 -0500
+@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
+ continue;
+ pte = ptep_clear_flush(vma, old_addr, old_pte);
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++ pte = pte_exprotect(pte);
++#endif
++
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
+
+@@ -262,6 +268,7 @@ unsigned long do_mremap(unsigned long ad
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ goto out;
+@@ -280,6 +287,15 @@ unsigned long do_mremap(unsigned long ad
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++ old_len > pax_task_size || addr > pax_task_size-old_len)
++ goto out;
++
+ /* new_addr is only valid if MREMAP_FIXED is specified */
+ if (flags & MREMAP_FIXED) {
+ if (new_addr & ~PAGE_MASK)
+@@ -287,16 +303,13 @@ unsigned long do_mremap(unsigned long ad
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
+- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++ if (new_addr > pax_task_size - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+- if ((new_addr <= addr) && (new_addr+new_len) > addr)
+- goto out;
+-
+- if ((addr <= new_addr) && (addr+old_len) > new_addr)
++ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+@@ -334,6 +347,14 @@ unsigned long do_mremap(unsigned long ad
+ ret = -EINVAL;
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma)) {
++ ret = -EINVAL;
++ goto out;
++ }
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
+@@ -367,7 +388,7 @@ unsigned long do_mremap(unsigned long ad
+ if (old_len == vma->vm_end - addr &&
+ !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
+ (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
+- unsigned long max_addr = TASK_SIZE;
++ unsigned long max_addr = pax_task_size;
+ if (vma->vm_next)
+ max_addr = vma->vm_next->vm_start;
+ /* can we just expand the current mapping? */
+@@ -385,6 +406,7 @@ unsigned long do_mremap(unsigned long ad
+ addr + new_len);
+ }
+ ret = addr;
++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+ goto out;
+ }
+ }
+@@ -395,8 +417,8 @@ unsigned long do_mremap(unsigned long ad
+ */
+ ret = -ENOMEM;
+ if (flags & MREMAP_MAYMOVE) {
++ unsigned long map_flags = 0;
+ if (!(flags & MREMAP_FIXED)) {
+- unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+@@ -411,7 +433,12 @@ unsigned long do_mremap(unsigned long ad
+ if (ret)
+ goto out;
+ }
++ map_flags = vma->vm_flags;
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
++ if (!(ret & ~PAGE_MASK)) {
++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++ }
+ }
+ out:
+ if (ret & ~PAGE_MASK)
+diff -urNp linux-2.6.31.7/mm/nommu.c linux-2.6.31.7/mm/nommu.c
+--- linux-2.6.31.7/mm/nommu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/nommu.c 2009-12-08 17:39:44.310816215 -0500
+@@ -79,7 +79,7 @@ static struct kmem_cache *vm_region_jar;
+ struct rb_root nommu_region_tree = RB_ROOT;
+ DECLARE_RWSEM(nommu_region_sem);
+
+-struct vm_operations_struct generic_file_vm_ops = {
++const struct vm_operations_struct generic_file_vm_ops = {
+ };
+
+ /*
+@@ -780,15 +780,6 @@ struct vm_area_struct *find_vma(struct m
+ EXPORT_SYMBOL(find_vma);
+
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+- return find_vma(mm, addr);
+-}
+-
+-/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+diff -urNp linux-2.6.31.7/mm/page_alloc.c linux-2.6.31.7/mm/page_alloc.c
+--- linux-2.6.31.7/mm/page_alloc.c 2009-12-08 17:29:51.642741401 -0500
++++ linux-2.6.31.7/mm/page_alloc.c 2009-12-08 17:39:44.311753524 -0500
+@@ -559,6 +559,10 @@ static void __free_pages_ok(struct page
+ int bad = 0;
+ int wasMlocked = TestClearPageMlocked(page);
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ unsigned long index = 1UL << order;
++#endif
++
+ kmemcheck_free_shadow(page, order);
+
+ for (i = 0 ; i < (1 << order) ; ++i)
+@@ -571,6 +575,12 @@ static void __free_pages_ok(struct page
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ for (; index; --index)
++ sanitize_highpage(page + index - 1);
++#endif
++
+ arch_free_page(page, order);
+ kernel_map_pages(page, 1 << order, 0);
+
+@@ -662,8 +672,10 @@ static int prep_new_page(struct page *pa
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
++#endif
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
+@@ -1039,6 +1051,11 @@ static void free_hot_cold_page(struct pa
+ debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+ debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
+ }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ sanitize_highpage(page);
++#endif
++
+ arch_free_page(page, 0);
+ kernel_map_pages(page, 1, 0);
+
+diff -urNp linux-2.6.31.7/mm/percpu.c linux-2.6.31.7/mm/percpu.c
+--- linux-2.6.31.7/mm/percpu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/percpu.c 2009-12-08 17:39:44.312741922 -0500
+@@ -105,7 +105,7 @@ static int pcpu_nr_slots __read_mostly;
+ static size_t pcpu_chunk_struct_size __read_mostly;
+
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+ /*
+diff -urNp linux-2.6.31.7/mm/rmap.c linux-2.6.31.7/mm/rmap.c
+--- linux-2.6.31.7/mm/rmap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/rmap.c 2009-12-08 17:39:44.312741922 -0500
+@@ -103,6 +103,10 @@ int anon_vma_prepare(struct vm_area_stru
+ struct mm_struct *mm = vma->vm_mm;
+ struct anon_vma *allocated;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ anon_vma = find_mergeable_anon_vma(vma);
+ allocated = NULL;
+ if (!anon_vma) {
+@@ -116,6 +120,15 @@ int anon_vma_prepare(struct vm_area_stru
+ /* page_table_lock to protect against threads */
+ spin_lock(&mm->page_table_lock);
+ if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ vma_m->anon_vma = anon_vma;
++ __anon_vma_link(vma_m);
++ }
++#endif
++
+ vma->anon_vma = anon_vma;
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+ allocated = NULL;
+diff -urNp linux-2.6.31.7/mm/shmem.c linux-2.6.31.7/mm/shmem.c
+--- linux-2.6.31.7/mm/shmem.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/shmem.c 2009-12-08 17:39:44.313712854 -0500
+@@ -31,7 +31,7 @@
+ #include <linux/swap.h>
+ #include <linux/ima.h>
+
+-static struct vfsmount *shm_mnt;
++struct vfsmount *shm_mnt;
+
+ #ifdef CONFIG_SHMEM
+ /*
+@@ -219,7 +219,7 @@ static const struct file_operations shme
+ static const struct inode_operations shmem_inode_operations;
+ static const struct inode_operations shmem_dir_inode_operations;
+ static const struct inode_operations shmem_special_inode_operations;
+-static struct vm_operations_struct shmem_vm_ops;
++static const struct vm_operations_struct shmem_vm_ops;
+
+ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
+ .ra_pages = 0, /* No readahead */
+@@ -2497,7 +2497,7 @@ static const struct super_operations shm
+ .put_super = shmem_put_super,
+ };
+
+-static struct vm_operations_struct shmem_vm_ops = {
++static const struct vm_operations_struct shmem_vm_ops = {
+ .fault = shmem_fault,
+ #ifdef CONFIG_NUMA
+ .set_policy = shmem_set_policy,
+diff -urNp linux-2.6.31.7/mm/slab.c linux-2.6.31.7/mm/slab.c
+--- linux-2.6.31.7/mm/slab.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/slab.c 2009-12-08 17:39:44.314811970 -0500
+@@ -308,7 +308,7 @@ struct kmem_list3 {
+ * Need this for bootstrapping a per node allocator.
+ */
+ #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
+-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
+ #define CACHE_CACHE 0
+ #define SIZE_AC MAX_NUMNODES
+ #define SIZE_L3 (2 * MAX_NUMNODES)
+@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+- const struct slab *slab, void *obj)
++ const struct slab *slab, const void *obj)
+ {
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -584,14 +584,14 @@ struct cache_names {
+ static struct cache_names __initdata cache_names[] = {
+ #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
+ #include <linux/kmalloc_sizes.h>
+- {NULL,}
++ {NULL, NULL}
+ #undef CACHE
+ };
+
+ static struct arraycache_init initarray_cache __initdata =
+- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
+ static struct arraycache_init initarray_generic =
+- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
+
+ /* internal cache of cache description objs */
+ static struct kmem_cache cache_cache = {
+@@ -4473,15 +4473,64 @@ static const struct file_operations proc
+
+ static int __init slab_proc_init(void)
+ {
++#if !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+ proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
+ #endif
++#endif
+ return 0;
+ }
+ module_init(slab_proc_init);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct kmem_cache *cachep;
++ struct slab *slabp;
++ struct page *page;
++ unsigned int objnr;
++ unsigned long offset;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = virt_to_head_page(ptr);
++
++ /* XXX: can get a little tighter with this stack check */
++ if (!PageSlab(page) && object_is_on_stack(ptr) &&
++ (n > ((unsigned long)task_stack_page(current) + THREAD_SIZE -
++ (unsigned long)ptr)))
++ goto report;
++ else if (!PageSlab(page))
++ return;
++
++ cachep = page_get_cache(page);
++ slabp = page_get_slab(page);
++ objnr = obj_to_index(cachep, slabp, ptr);
++ BUG_ON(objnr >= cachep->num);
++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
++ return;
++
++report:
++ if (to)
++ pax_report_leak_to_user(ptr, n);
++ else
++ pax_report_overflow_from_user(ptr, n);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+diff -urNp linux-2.6.31.7/mm/slob.c linux-2.6.31.7/mm/slob.c
+--- linux-2.6.31.7/mm/slob.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/slob.c 2009-12-08 17:39:44.314811970 -0500
+@@ -29,7 +29,7 @@
+ * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
+ * alloc_pages() directly, allocating compound pages so the page order
+ * does not have to be separately tracked, and also stores the exact
+- * allocation size in page->private so that it can be used to accurately
++ * allocation size in slob_page->size so that it can be used to accurately
+ * provide ksize(). These objects are detected in kfree() because slob_page()
+ * is false for them.
+ *
+@@ -58,6 +58,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h> /* struct reclaim_state */
+@@ -100,7 +101,8 @@ struct slob_page {
+ unsigned long flags; /* mandatory */
+ atomic_t _count; /* mandatory */
+ slobidx_t units; /* free units left in page */
+- unsigned long pad[2];
++ unsigned long pad[1];
++ unsigned long size; /* size when >=PAGE_SIZE */
+ slob_t *free; /* first free slob_t in page */
+ struct list_head list; /* linked list of free pages */
+ };
+@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
+ */
+ static inline int is_slob_page(struct slob_page *sp)
+ {
+- return PageSlab((struct page *)sp);
++ return PageSlab((struct page *)sp) && !sp->size;
+ }
+
+ static inline void set_slob_page(struct slob_page *sp)
+@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
+
+ static inline struct slob_page *slob_page(const void *addr)
+ {
+- return (struct slob_page *)virt_to_page(addr);
++ return (struct slob_page *)virt_to_head_page(addr);
+ }
+
+ /*
+@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
+ /*
+ * Return the size of a slob block.
+ */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+ if (s->units > 0)
+ return s->units;
+@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+ * Return the next free slob block pointer after this one.
+ */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+ slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+ slobidx_t next;
+@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
+ /*
+ * Returns true if s is the last free block in its page.
+ */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+ return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
+ if (!page)
+ return NULL;
+
++ set_slob_page(page);
+ return page_address(page);
+ }
+
+@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
+ if (!b)
+ return NULL;
+ sp = slob_page(b);
+- set_slob_page(sp);
+
+ spin_lock_irqsave(&slob_lock, flags);
+ sp->units = SLOB_UNITS(PAGE_SIZE);
+ sp->free = b;
++ sp->size = 0;
+ INIT_LIST_HEAD(&sp->list);
+ set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+ set_slob_page_free(sp, slob_list);
+@@ -475,10 +478,9 @@ out:
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
+ #endif
+
+-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
+ {
+- unsigned int *m;
+- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ slob_t *m;
+ void *ret;
+
+ lockdep_trace_alloc(gfp);
+@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
+
+ if (!m)
+ return NULL;
+- *m = size;
++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++ m[0].units = size;
++ m[1].units = align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+@@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
+
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
+ if (ret) {
+- struct page *page;
+- page = virt_to_page(ret);
+- page->private = size;
++ struct slob_page *sp;
++ sp = slob_page(ret);
++ sp->size = size;
+ }
+
+ trace_kmalloc_node(_RET_IP_, ret,
+@@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
+ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
+ }
++
++void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++{
++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++
++ return __kmalloc_node_align(size, gfp, node, align);
++}
+ EXPORT_SYMBOL(__kmalloc_node);
+
+ void kfree(const void *block)
+@@ -528,13 +540,86 @@ void kfree(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- slob_free(m, *m + align);
+- } else
++ slob_t *m = (slob_t *)(block - align);
++ slob_free(m, m[0].units + align);
++ } else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ put_page(&sp->page);
++ }
+ }
+ EXPORT_SYMBOL(kfree);
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct slob_page *sp;
++ const slob_t *free;
++ const void *base;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ sp = slob_page(ptr);
++ /* XXX: can get a little tighter with this stack check */
++ if (!PageSlobPage((struct page*)sp) && object_is_on_stack(ptr) &&
++ (n > ((unsigned long)task_stack_page(current) + THREAD_SIZE -
++ (unsigned long)ptr)))
++ goto report;
++ else if (!PageSlobPage((struct page*)sp))
++ return;
++
++ if (sp->size) {
++ base = page_address(&sp->page);
++ if (base <= ptr && n <= sp->size - (ptr - base))
++ return;
++ goto report;
++ }
++
++ /* some tricky double walking to find the chunk */
++ base = (void *)((unsigned long)ptr & PAGE_MASK);
++ free = sp->free;
++
++ while (!slob_last(free) && (void *)free <= ptr) {
++ base = free + slob_units(free);
++ free = slob_next(free);
++ }
++
++ while (base < (void *)free) {
++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++ int size = SLOB_UNIT * SLOB_UNITS(m + align);
++ int offset;
++
++ if (ptr < base + align)
++ goto report;
++
++ offset = ptr - base - align;
++ if (offset < m) {
++ if (n <= m - offset)
++ return;
++ goto report;
++ }
++ base += size;
++ }
++
++report:
++ if (to)
++ pax_report_leak_to_user(ptr, n);
++ else
++ pax_report_overflow_from_user(ptr, n);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+@@ -547,10 +632,10 @@ size_t ksize(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- return SLOB_UNITS(*m) * SLOB_UNIT;
++ slob_t *m = (slob_t *)(block - align);
++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ } else
+- return sp->page.private;
++ return sp->size;
+ }
+ EXPORT_SYMBOL(ksize);
+
+@@ -605,17 +690,25 @@ void *kmem_cache_alloc_node(struct kmem_
+ {
+ void *b;
+
++#ifdef CONFIG_PAX_USERCOPY
++ b = __kmalloc_node_align(c->size, flags, node, c->align);
++#else
+ if (c->size < PAGE_SIZE) {
+ b = slob_alloc(c->size, flags, c->align, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
++ struct slob_page *sp;
++
+ b = slob_new_pages(flags, get_order(c->size), node);
++ sp = slob_page(b);
++ sp->size = c->size;
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
++#endif
+
+ if (c->ctor)
+ c->ctor(b);
+@@ -627,10 +720,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+
+ static void __kmem_cache_free(void *b, int size)
+ {
+- if (size < PAGE_SIZE)
++ struct slob_page *sp = slob_page(b);
++
++ if (is_slob_page(sp))
+ slob_free(b, size);
+- else
++ else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ slob_free_pages(b, get_order(size));
++ }
+ }
+
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -643,15 +742,24 @@ static void kmem_rcu_free(struct rcu_hea
+
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++ int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY
++ if (size + c->align < PAGE_SIZE) {
++ size += c->align;
++ b -= c->align;
++ }
++#endif
++
+ kmemleak_free_recursive(b, c->flags);
+ if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slob_rcu *slob_rcu;
+- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
++ slob_rcu = b + (size - sizeof(struct slob_rcu));
+ INIT_RCU_HEAD(&slob_rcu->head);
+- slob_rcu->size = c->size;
++ slob_rcu->size = size;
+ call_rcu(&slob_rcu->head, kmem_rcu_free);
+ } else {
+- __kmem_cache_free(b, c->size);
++ __kmem_cache_free(b, size);
+ }
+
+ trace_kmem_cache_free(_RET_IP_, b);
+diff -urNp linux-2.6.31.7/mm/slub.c linux-2.6.31.7/mm/slub.c
+--- linux-2.6.31.7/mm/slub.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/slub.c 2009-12-08 17:39:44.316652650 -0500
+@@ -1915,7 +1915,7 @@ static int slub_min_objects;
+ * Merge control. If this is set then no merging of slab caches will occur.
+ * (Could be removed. This was introduced to pacify the merge skeptics.)
+ */
+-static int slub_nomerge;
++static int slub_nomerge = 1;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -2458,7 +2458,7 @@ static int kmem_cache_open(struct kmem_c
+ * list to avoid pounding the page allocator excessively.
+ */
+ set_min_partial(s, ilog2(s->size));
+- s->refcount = 1;
++ atomic_set(&s->refcount, 1);
+ #ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+ #endif
+@@ -2595,8 +2595,7 @@ static inline int kmem_cache_close(struc
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ down_write(&slub_lock);
+- s->refcount--;
+- if (!s->refcount) {
++ if (atomic_dec_and_test(&s->refcount)) {
+ list_del(&s->list);
+ up_write(&slub_lock);
+ if (kmem_cache_close(s)) {
+@@ -2875,6 +2874,48 @@ void *__kmalloc_node(size_t size, gfp_t
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct page *page;
++ struct kmem_cache *s;
++ unsigned long offset;
++
++ if (!n)
++ return;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = get_object_page(ptr);
++
++ /* XXX: can get a little tighter with this stack check */
++ if (!page && object_is_on_stack(ptr) &&
++ (n > ((unsigned long)task_stack_page(current) + THREAD_SIZE -
++ (unsigned long)ptr)))
++ goto report;
++ else if (!page)
++ return;
++
++ s = page->slab;
++ offset = (ptr - page_address(page)) % s->size;
++ if (offset <= s->objsize && n <= s->objsize - offset)
++ return;
++
++report:
++ if (to)
++ pax_report_leak_to_user(ptr, n);
++ else
++ pax_report_overflow_from_user(ptr, n);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ size_t ksize(const void *object)
+ {
+ struct page *page;
+@@ -3146,7 +3187,7 @@ void __init kmem_cache_init(void)
+ */
+ create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
+ sizeof(struct kmem_cache_node), GFP_NOWAIT);
+- kmalloc_caches[0].refcount = -1;
++ atomic_set(&kmalloc_caches[0].refcount, -1);
+ caches++;
+
+ hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
+@@ -3240,7 +3281,7 @@ static int slab_unmergeable(struct kmem_
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+- if (s->refcount < 0)
++ if (atomic_read(&s->refcount) < 0)
+ return 1;
+
+ return 0;
+@@ -3297,7 +3338,7 @@ struct kmem_cache *kmem_cache_create(con
+ if (s) {
+ int cpu;
+
+- s->refcount++;
++ atomic_inc(&s->refcount);
+ /*
+ * Adjust the object sizes so that we clear
+ * the complete object on kzalloc.
+@@ -3316,7 +3357,7 @@ struct kmem_cache *kmem_cache_create(con
+
+ if (sysfs_slab_alias(s, name)) {
+ down_write(&slub_lock);
+- s->refcount--;
++ atomic_dec(&s->refcount);
+ up_write(&slub_lock);
+ goto err;
+ }
+@@ -4045,7 +4086,7 @@ SLAB_ATTR_RO(ctor);
+
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+- return sprintf(buf, "%d\n", s->refcount - 1);
++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+
+@@ -4447,7 +4488,7 @@ static void kmem_cache_release(struct ko
+ kfree(s);
+ }
+
+-static struct sysfs_ops slab_sysfs_ops = {
++static const struct sysfs_ops slab_sysfs_ops = {
+ .show = slab_attr_show,
+ .store = slab_attr_store,
+ };
+@@ -4466,7 +4507,7 @@ static int uevent_filter(struct kset *ks
+ return 0;
+ }
+
+-static struct kset_uevent_ops slab_uevent_ops = {
++static const struct kset_uevent_ops slab_uevent_ops = {
+ .filter = uevent_filter,
+ };
+
+@@ -4726,7 +4767,9 @@ static const struct file_operations proc
+
+ static int __init slab_proc_init(void)
+ {
++#if !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
++#endif
+ return 0;
+ }
+ module_init(slab_proc_init);
+diff -urNp linux-2.6.31.7/mm/util.c linux-2.6.31.7/mm/util.c
+--- linux-2.6.31.7/mm/util.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/util.c 2009-12-08 17:39:44.316652650 -0500
+@@ -224,6 +224,12 @@ EXPORT_SYMBOL(strndup_user);
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ }
+diff -urNp linux-2.6.31.7/mm/vmalloc.c linux-2.6.31.7/mm/vmalloc.c
+--- linux-2.6.31.7/mm/vmalloc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/mm/vmalloc.c 2009-12-08 17:39:44.316652650 -0500
+@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++ BUG_ON(!pte_exec(*pte));
++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++ continue;
++ }
++#endif
++
++ {
++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+
+@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ {
+ pte_t *pte;
++ int ret = -ENOMEM;
+
+ /*
+ * nr is a running index into the array which helps higher level
+@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
++
++ pax_open_kernel();
+ do {
+ struct page *page = pages[*nr];
+
+- if (WARN_ON(!pte_none(*pte)))
+- return -EBUSY;
+- if (WARN_ON(!page))
+- return -ENOMEM;
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++ if (WARN_ON(!pte_none(*pte))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ if (WARN_ON(!page)) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+ (*nr)++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+- return 0;
++ ret = 0;
++out:
++ pax_close_kernel();
++ return ret;
+ }
+
+ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -193,11 +218,20 @@ static inline int is_vmalloc_or_module_a
+ * and fall back on vmalloc() if that fails. Others
+ * just put it in the vmalloc space.
+ */
+-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
++#ifdef CONFIG_MODULES
++#ifdef MODULES_VADDR
+ unsigned long addr = (unsigned long)x;
+ if (addr >= MODULES_VADDR && addr < MODULES_END)
+ return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++ return 1;
++#endif
++
++#endif
++
+ return is_vmalloc_addr(x);
+ }
+
+@@ -292,13 +326,13 @@ static void __insert_vmap_area(struct vm
+ struct rb_node *tmp;
+
+ while (*p) {
+- struct vmap_area *tmp;
++ struct vmap_area *varea;
+
+ parent = *p;
+- tmp = rb_entry(parent, struct vmap_area, rb_node);
+- if (va->va_start < tmp->va_end)
++ varea = rb_entry(parent, struct vmap_area, rb_node);
++ if (va->va_start < varea->va_end)
+ p = &(*p)->rb_left;
+- else if (va->va_end > tmp->va_start)
++ else if (va->va_end > varea->va_start)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+@@ -1132,6 +1166,16 @@ static struct vm_struct *__get_vm_area_n
+ unsigned long align = 1;
+
+ BUG_ON(in_interrupt());
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (flags & VM_KERNEXEC) {
++ if (start != VMALLOC_START || end != VMALLOC_END)
++ return NULL;
++ start = (unsigned long)&MODULES_EXEC_VADDR;
++ end = (unsigned long)&MODULES_EXEC_END;
++ }
++#endif
++
+ if (flags & VM_IOREMAP) {
+ int bit = fls(size);
+
+@@ -1371,6 +1415,11 @@ void *vmap(struct page **pages, unsigned
+ if (count > num_physpages)
+ return NULL;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ flags |= VM_KERNEXEC;
++#endif
++
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
+ if (!area)
+@@ -1478,6 +1527,13 @@ static void *__vmalloc_node(unsigned lon
+ if (!size || (size >> PAGE_SHIFT) > num_physpages)
+ return NULL;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ area = __get_vm_area_node(size, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
++ node, gfp_mask, caller);
++ else
++#endif
++
+ area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+ node, gfp_mask, caller);
+
+@@ -1496,6 +1552,7 @@ static void *__vmalloc_node(unsigned lon
+ return addr;
+ }
+
++#undef __vmalloc
+ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+ {
+ return __vmalloc_node(size, gfp_mask, prot, -1,
+@@ -1512,6 +1569,7 @@ EXPORT_SYMBOL(__vmalloc);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
++#undef vmalloc
+ void *vmalloc(unsigned long size)
+ {
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+@@ -1526,6 +1584,7 @@ EXPORT_SYMBOL(vmalloc);
+ * The resulting memory area is zeroed so it can be mapped to userspace
+ * without leaking data.
+ */
++#undef vmalloc_user
+ void *vmalloc_user(unsigned long size)
+ {
+ struct vm_struct *area;
+@@ -1552,6 +1611,7 @@ EXPORT_SYMBOL(vmalloc_user);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
++#undef vmalloc_node
+ void *vmalloc_node(unsigned long size, int node)
+ {
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+@@ -1574,10 +1634,10 @@ EXPORT_SYMBOL(vmalloc_node);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+-
++#undef vmalloc_exec
+ void *vmalloc_exec(unsigned long size)
+ {
+- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+ -1, __builtin_return_address(0));
+ }
+
+@@ -1596,6 +1656,7 @@ void *vmalloc_exec(unsigned long size)
+ * Allocate enough 32bit PA addressable pages to cover @size from the
+ * page level allocator and map them into contiguous kernel virtual space.
+ */
++#undef vmalloc_32
+ void *vmalloc_32(unsigned long size)
+ {
+ return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
+@@ -1610,6 +1671,7 @@ EXPORT_SYMBOL(vmalloc_32);
+ * The resulting memory area is 32bit addressable and zeroed so it can be
+ * mapped to userspace without leaking data.
+ */
++#undef vmalloc_32_user
+ void *vmalloc_32_user(unsigned long size)
+ {
+ struct vm_struct *area;
+diff -urNp linux-2.6.31.7/net/atm/atm_misc.c linux-2.6.31.7/net/atm/atm_misc.c
+--- linux-2.6.31.7/net/atm/atm_misc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/atm/atm_misc.c 2009-12-08 17:39:44.316652650 -0500
+@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
+ if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+ return 1;
+ atm_return(vcc,truesize);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return 0;
+ }
+
+@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
+ }
+ }
+ atm_return(vcc,guess);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return NULL;
+ }
+
+@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
+
+ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
+
+ void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -urNp linux-2.6.31.7/net/atm/proc.c linux-2.6.31.7/net/atm/proc.c
+--- linux-2.6.31.7/net/atm/proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/atm/proc.c 2009-12-08 17:39:44.317812984 -0500
+@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
+ const struct k_atm_aal_stats *stats)
+ {
+ seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
+- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
+- atomic_read(&stats->rx_drop));
++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++ atomic_read_unchecked(&stats->rx_drop));
+ }
+
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+diff -urNp linux-2.6.31.7/net/atm/resources.c linux-2.6.31.7/net/atm/resources.c
+--- linux-2.6.31.7/net/atm/resources.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/atm/resources.c 2009-12-08 17:39:44.317812984 -0500
+@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -urNp linux-2.6.31.7/net/bridge/br_private.h linux-2.6.31.7/net/bridge/br_private.h
+--- linux-2.6.31.7/net/bridge/br_private.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/bridge/br_private.h 2009-12-08 17:39:44.318663527 -0500
+@@ -250,7 +250,7 @@ extern void br_ifinfo_notify(int event,
+
+ #ifdef CONFIG_SYSFS
+ /* br_sysfs_if.c */
+-extern struct sysfs_ops brport_sysfs_ops;
++extern const struct sysfs_ops brport_sysfs_ops;
+ extern int br_sysfs_addif(struct net_bridge_port *p);
+
+ /* br_sysfs_br.c */
+diff -urNp linux-2.6.31.7/net/bridge/br_stp_if.c linux-2.6.31.7/net/bridge/br_stp_if.c
+--- linux-2.6.31.7/net/bridge/br_stp_if.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/bridge/br_stp_if.c 2009-12-08 17:39:44.318663527 -0500
+@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
+ char *envp[] = { NULL };
+
+ if (br->stp_enabled == BR_USER_STP) {
+- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
++ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+ printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
+ br->dev->name, r);
+
+diff -urNp linux-2.6.31.7/net/bridge/br_sysfs_if.c linux-2.6.31.7/net/bridge/br_sysfs_if.c
+--- linux-2.6.31.7/net/bridge/br_sysfs_if.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/bridge/br_sysfs_if.c 2009-12-08 17:39:44.318663527 -0500
+@@ -203,7 +203,7 @@ static ssize_t brport_store(struct kobje
+ return ret;
+ }
+
+-struct sysfs_ops brport_sysfs_ops = {
++const struct sysfs_ops brport_sysfs_ops = {
+ .show = brport_show,
+ .store = brport_store,
+ };
+diff -urNp linux-2.6.31.7/net/core/flow.c linux-2.6.31.7/net/core/flow.c
+--- linux-2.6.31.7/net/core/flow.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/core/flow.c 2009-12-08 17:39:44.318663527 -0500
+@@ -39,7 +39,7 @@ atomic_t flow_cache_genid = ATOMIC_INIT(
+
+ static u32 flow_hash_shift;
+ #define flow_hash_size (1 << flow_hash_shift)
+-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
++static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
+
+ #define flow_table(cpu) (per_cpu(flow_tables, cpu))
+
+@@ -52,7 +52,7 @@ struct flow_percpu_info {
+ u32 hash_rnd;
+ int count;
+ };
+-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
++static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
+
+ #define flow_hash_rnd_recalc(cpu) \
+ (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
+@@ -69,7 +69,7 @@ struct flow_flush_info {
+ atomic_t cpuleft;
+ struct completion completion;
+ };
+-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
++static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
+
+ #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
+
+diff -urNp linux-2.6.31.7/net/dccp/ccids/ccid3.c linux-2.6.31.7/net/dccp/ccids/ccid3.c
+--- linux-2.6.31.7/net/dccp/ccids/ccid3.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/dccp/ccids/ccid3.c 2009-12-08 17:39:44.318663527 -0500
+@@ -43,7 +43,7 @@
+ static int ccid3_debug;
+ #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
+ #else
+-#define ccid3_pr_debug(format, a...)
++#define ccid3_pr_debug(format, a...) do {} while (0)
+ #endif
+
+ /*
+diff -urNp linux-2.6.31.7/net/dccp/dccp.h linux-2.6.31.7/net/dccp/dccp.h
+--- linux-2.6.31.7/net/dccp/dccp.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/dccp/dccp.h 2009-12-08 17:39:44.319813462 -0500
+@@ -44,9 +44,9 @@ extern int dccp_debug;
+ #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
+ #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
+ #else
+-#define dccp_pr_debug(format, a...)
+-#define dccp_pr_debug_cat(format, a...)
+-#define dccp_debug(format, a...)
++#define dccp_pr_debug(format, a...) do {} while (0)
++#define dccp_pr_debug_cat(format, a...) do {} while (0)
++#define dccp_debug(format, a...) do {} while (0)
+ #endif
+
+ extern struct inet_hashinfo dccp_hashinfo;
+diff -urNp linux-2.6.31.7/net/ipv4/inet_hashtables.c linux-2.6.31.7/net/ipv4/inet_hashtables.c
+--- linux-2.6.31.7/net/ipv4/inet_hashtables.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv4/inet_hashtables.c 2009-12-08 17:39:44.319813462 -0500
+@@ -18,11 +18,14 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/wait.h>
++#include <linux/security.h>
+
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/ip.h>
+
++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++
+ /*
+ * Allocate and initialize a new local port bind bucket.
+ * The bindhash mutex for snum's hash chain must be held here.
+@@ -490,6 +493,8 @@ ok:
+ }
+ spin_unlock(&head->lock);
+
++ gr_update_task_in_ip_table(current, inet_sk(sk));
++
+ if (tw) {
+ inet_twsk_deschedule(tw, death_row);
+ inet_twsk_put(tw);
+diff -urNp linux-2.6.31.7/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.31.7/net/ipv4/netfilter/nf_nat_snmp_basic.c
+--- linux-2.6.31.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2009-12-08 17:39:49.661916185 -0500
+@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
+
+ *len = 0;
+
+- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
++ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
+ if (*octets == NULL) {
+ if (net_ratelimit())
+ printk("OOM in bsalg (%d)\n", __LINE__);
+diff -urNp linux-2.6.31.7/net/ipv4/tcp_ipv4.c linux-2.6.31.7/net/ipv4/tcp_ipv4.c
+--- linux-2.6.31.7/net/ipv4/tcp_ipv4.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv4/tcp_ipv4.c 2009-12-08 17:39:49.662748477 -0500
+@@ -1504,6 +1504,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!skb->dev || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ tcp_v4_send_reset(rsk, skb);
+ discard:
+ kfree_skb(skb);
+@@ -1612,6 +1615,9 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (skb->dev->flags & IFF_LOOPBACK)
++#endif
+ tcp_v4_send_reset(NULL, skb);
+ }
+
+diff -urNp linux-2.6.31.7/net/ipv4/tcp_minisocks.c linux-2.6.31.7/net/ipv4/tcp_minisocks.c
+--- linux-2.6.31.7/net/ipv4/tcp_minisocks.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv4/tcp_minisocks.c 2009-12-08 17:39:49.662748477 -0500
+@@ -695,8 +695,11 @@ listen_overflow:
+
+ embryonic_reset:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
++
++#ifndef CONFIG_GRKERNSEC_BLACKHOLE
+ if (!(flg & TCP_FLAG_RST))
+ req->rsk_ops->send_reset(sk, skb);
++#endif
+
+ inet_csk_reqsk_queue_drop(sk, req, prev);
+ return NULL;
+diff -urNp linux-2.6.31.7/net/ipv4/udp.c linux-2.6.31.7/net/ipv4/udp.c
+--- linux-2.6.31.7/net/ipv4/udp.c 2009-12-08 17:29:51.644746217 -0500
++++ linux-2.6.31.7/net/ipv4/udp.c 2009-12-08 17:39:49.663905073 -0500
+@@ -86,6 +86,7 @@
+ #include <linux/types.h>
+ #include <linux/fcntl.h>
+ #include <linux/module.h>
++#include <linux/security.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
+@@ -369,6 +370,9 @@ found:
+ return s;
+ }
+
++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
++
+ /*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+@@ -631,9 +635,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ dport = usin->sin_port;
+ if (dport == 0)
+ return -EINVAL;
++
++ err = gr_search_udp_sendmsg(sk, usin);
++ if (err)
++ return err;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
++
++ err = gr_search_udp_sendmsg(sk, NULL);
++ if (err)
++ return err;
++
+ daddr = inet->daddr;
+ dport = inet->dport;
+ /* Open fast path for connected socket.
+@@ -934,6 +947,10 @@ try_again:
+ if (!skb)
+ goto out;
+
++ err = gr_search_udp_recvmsg(sk, skb);
++ if (err)
++ goto out_free;
++
+ ulen = skb->len - sizeof(struct udphdr);
+ copied = len;
+ if (copied > ulen)
+@@ -1322,6 +1339,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
+ goto csum_error;
+
+ UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (skb->dev->flags & IFF_LOOPBACK)
++#endif
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ /*
+diff -urNp linux-2.6.31.7/net/ipv6/exthdrs.c linux-2.6.31.7/net/ipv6/exthdrs.c
+--- linux-2.6.31.7/net/ipv6/exthdrs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv6/exthdrs.c 2009-12-08 17:39:49.663905073 -0500
+@@ -630,7 +630,7 @@ static struct tlvtype_proc tlvprochopopt
+ .type = IPV6_TLV_JUMBO,
+ .func = ipv6_hop_jumbo,
+ },
+- { -1, }
++ { -1, NULL }
+ };
+
+ int ipv6_parse_hopopts(struct sk_buff *skb)
+diff -urNp linux-2.6.31.7/net/ipv6/ip6mr.c linux-2.6.31.7/net/ipv6/ip6mr.c
+--- linux-2.6.31.7/net/ipv6/ip6mr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv6/ip6mr.c 2009-12-08 17:39:49.663905073 -0500
+@@ -204,7 +204,7 @@ static int ip6mr_vif_seq_show(struct seq
+ return 0;
+ }
+
+-static struct seq_operations ip6mr_vif_seq_ops = {
++static const struct seq_operations ip6mr_vif_seq_ops = {
+ .start = ip6mr_vif_seq_start,
+ .next = ip6mr_vif_seq_next,
+ .stop = ip6mr_vif_seq_stop,
+@@ -217,7 +217,7 @@ static int ip6mr_vif_open(struct inode *
+ sizeof(struct ipmr_vif_iter));
+ }
+
+-static struct file_operations ip6mr_vif_fops = {
++static const struct file_operations ip6mr_vif_fops = {
+ .owner = THIS_MODULE,
+ .open = ip6mr_vif_open,
+ .read = seq_read,
+@@ -328,7 +328,7 @@ static int ipmr_mfc_seq_show(struct seq_
+ return 0;
+ }
+
+-static struct seq_operations ipmr_mfc_seq_ops = {
++static const struct seq_operations ipmr_mfc_seq_ops = {
+ .start = ipmr_mfc_seq_start,
+ .next = ipmr_mfc_seq_next,
+ .stop = ipmr_mfc_seq_stop,
+@@ -341,7 +341,7 @@ static int ipmr_mfc_open(struct inode *i
+ sizeof(struct ipmr_mfc_iter));
+ }
+
+-static struct file_operations ip6mr_mfc_fops = {
++static const struct file_operations ip6mr_mfc_fops = {
+ .owner = THIS_MODULE,
+ .open = ipmr_mfc_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/net/ipv6/raw.c linux-2.6.31.7/net/ipv6/raw.c
+--- linux-2.6.31.7/net/ipv6/raw.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv6/raw.c 2009-12-08 17:39:49.664906218 -0500
+@@ -600,7 +600,7 @@ out:
+ return err;
+ }
+
+-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
+ struct flowi *fl, struct rt6_info *rt,
+ unsigned int flags)
+ {
+diff -urNp linux-2.6.31.7/net/ipv6/tcp_ipv6.c linux-2.6.31.7/net/ipv6/tcp_ipv6.c
+--- linux-2.6.31.7/net/ipv6/tcp_ipv6.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/ipv6/tcp_ipv6.c 2009-12-08 17:39:49.664906218 -0500
+@@ -1577,6 +1577,9 @@ static int tcp_v6_do_rcv(struct sock *sk
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!skb->dev || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ tcp_v6_send_reset(sk, skb);
+ discard:
+ if (opt_skb)
+@@ -1699,6 +1702,9 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (skb->dev->flags & IFF_LOOPBACK)
++#endif
+ tcp_v6_send_reset(NULL, skb);
+ }
+
+diff -urNp linux-2.6.31.7/net/ipv6/udp.c linux-2.6.31.7/net/ipv6/udp.c
+--- linux-2.6.31.7/net/ipv6/udp.c 2009-12-08 17:29:51.645652587 -0500
++++ linux-2.6.31.7/net/ipv6/udp.c 2009-12-08 17:39:49.665908964 -0500
+@@ -587,6 +587,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
+ UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
+ proto == IPPROTO_UDPLITE);
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (skb->dev->flags & IFF_LOOPBACK)
++#endif
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
+
+ kfree_skb(skb);
+diff -urNp linux-2.6.31.7/net/irda/ircomm/ircomm_tty.c linux-2.6.31.7/net/irda/ircomm/ircomm_tty.c
+--- linux-2.6.31.7/net/irda/ircomm/ircomm_tty.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/irda/ircomm/ircomm_tty.c 2009-12-08 17:39:49.665908964 -0500
+@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
+ add_wait_queue(&self->open_wait, &wait);
+
+ IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
+
+ /* As far as I can see, we protect open_count - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+- self->open_count--;
++ atomic_dec(&self->open_count);
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+- self->blocked_open++;
++ atomic_inc(&self->blocked_open);
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD) {
+@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
+ }
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
+
+ schedule();
+ }
+@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
+ if (extra_count) {
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ atomic_inc(&self->open_count);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+- self->blocked_open--;
++ atomic_dec(&self->blocked_open);
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count);
++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count));
+
+ if (!retval)
+ self->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
+ }
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ atomic_inc(&self->open_count);
+
+ tty->driver_data = self;
+ self->tty = tty;
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+- self->line, self->open_count);
++ self->line, atomic_read(&self->open_count));
+
+ /* Not really used by us, but lets do it anyway */
+ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
+ return;
+ }
+
+- if ((tty->count == 1) && (self->open_count != 1)) {
++ if ((tty->count == 1) && (atomic_read(&self->open_count) != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
+ */
+ IRDA_DEBUG(0, "%s(), bad serial port count; "
+ "tty->count is 1, state->count is %d\n", __func__ ,
+- self->open_count);
+- self->open_count = 1;
++ atomic_read(&self->open_count));
++ atomic_set(&self->open_count, 1);
+ }
+
+- if (--self->open_count < 0) {
++ if (atomic_dec_return(&self->open_count) < 0) {
+ IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
+- __func__, self->line, self->open_count);
+- self->open_count = 0;
++ __func__, self->line, atomic_read(&self->open_count));
++ atomic_set(&self->open_count, 0);
+ }
+- if (self->open_count) {
++ if (atomic_read(&self->open_count)) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
+@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
+ tty->closing = 0;
+ self->tty = NULL;
+
+- if (self->blocked_open) {
++ if (atomic_read(&self->blocked_open)) {
+ if (self->close_delay)
+ schedule_timeout_interruptible(self->close_delay);
+ wake_up_interruptible(&self->open_wait);
+@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
+ spin_lock_irqsave(&self->spinlock, flags);
+ self->flags &= ~ASYNC_NORMAL_ACTIVE;
+ self->tty = NULL;
+- self->open_count = 0;
++ atomic_set(&self->open_count, 0);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ wake_up_interruptible(&self->open_wait);
+@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+- seq_printf(m, "Open count: %d\n", self->open_count);
++ seq_printf(m, "Open count: %d\n", atomic_read(&self->open_count));
+ seq_printf(m, "Max data size: %d\n", self->max_data_size);
+ seq_printf(m, "Max header size: %d\n", self->max_header_size);
+
+diff -urNp linux-2.6.31.7/net/key/af_key.c linux-2.6.31.7/net/key/af_key.c
+--- linux-2.6.31.7/net/key/af_key.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/key/af_key.c 2009-12-08 17:39:49.666906349 -0500
+@@ -3705,7 +3705,7 @@ static void pfkey_seq_stop(struct seq_fi
+ read_unlock(&pfkey_table_lock);
+ }
+
+-static struct seq_operations pfkey_seq_ops = {
++static const struct seq_operations pfkey_seq_ops = {
+ .start = pfkey_seq_start,
+ .next = pfkey_seq_next,
+ .stop = pfkey_seq_stop,
+@@ -3718,7 +3718,7 @@ static int pfkey_seq_open(struct inode *
+ sizeof(struct seq_net_private));
+ }
+
+-static struct file_operations pfkey_proc_ops = {
++static const struct file_operations pfkey_proc_ops = {
+ .open = pfkey_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.31.7/net/mac80211/ieee80211_i.h linux-2.6.31.7/net/mac80211/ieee80211_i.h
+--- linux-2.6.31.7/net/mac80211/ieee80211_i.h 2009-12-08 17:29:51.646747532 -0500
++++ linux-2.6.31.7/net/mac80211/ieee80211_i.h 2009-12-08 17:39:49.667795628 -0500
+@@ -609,7 +609,7 @@ struct ieee80211_local {
+ spinlock_t queue_stop_reason_lock;
+
+ struct net_device *mdev; /* wmaster# - "master" 802.11 device */
+- int open_count;
++ atomic_t open_count;
+ int monitors, cooked_mntrs;
+ /* number of interfaces with corresponding FIF_ flags */
+ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss;
+diff -urNp linux-2.6.31.7/net/mac80211/iface.c linux-2.6.31.7/net/mac80211/iface.c
+--- linux-2.6.31.7/net/mac80211/iface.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/iface.c 2009-12-08 17:39:49.667795628 -0500
+@@ -164,7 +164,7 @@ static int ieee80211_open(struct net_dev
+ break;
+ }
+
+- if (local->open_count == 0) {
++ if (atomic_read(&local->open_count) == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+@@ -198,7 +198,7 @@ static int ieee80211_open(struct net_dev
+ * Validate the MAC address for this device.
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+- if (!local->open_count)
++ if (!atomic_read(&local->open_count))
+ drv_stop(local);
+ return -EADDRNOTAVAIL;
+ }
+@@ -281,7 +281,7 @@ static int ieee80211_open(struct net_dev
+ }
+ }
+
+- if (local->open_count == 0) {
++ if (atomic_read(&local->open_count) == 0) {
+ res = dev_open(local->mdev);
+ WARN_ON(res);
+ if (res)
+@@ -303,7 +303,7 @@ static int ieee80211_open(struct net_dev
+
+ hw_reconf_flags |= __ieee80211_recalc_idle(local);
+
+- local->open_count++;
++ atomic_inc(&local->open_count);
+ if (hw_reconf_flags) {
+ ieee80211_hw_config(local, hw_reconf_flags);
+ /*
+@@ -331,7 +331,7 @@ static int ieee80211_open(struct net_dev
+ err_del_interface:
+ drv_remove_interface(local, &conf);
+ err_stop:
+- if (!local->open_count)
++ if (!atomic_read(&local->open_count))
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+@@ -429,7 +429,7 @@ static int ieee80211_stop(struct net_dev
+ WARN_ON(!list_empty(&sdata->u.ap.vlans));
+ }
+
+- local->open_count--;
++ atomic_dec(&local->open_count);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+@@ -554,7 +554,7 @@ static int ieee80211_stop(struct net_dev
+
+ ieee80211_recalc_ps(local, -1);
+
+- if (local->open_count == 0) {
++ if (atomic_read(&local->open_count) == 0) {
+ if (netif_running(local->mdev))
+ dev_close(local->mdev);
+
+diff -urNp linux-2.6.31.7/net/mac80211/main.c linux-2.6.31.7/net/mac80211/main.c
+--- linux-2.6.31.7/net/mac80211/main.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/main.c 2009-12-08 17:39:49.668664792 -0500
+@@ -193,7 +193,7 @@ int ieee80211_hw_config(struct ieee80211
+ local->hw.conf.power_level = power;
+ }
+
+- if (changed && local->open_count) {
++ if (changed && atomic_read(&local->open_count)) {
+ ret = drv_config(local, changed);
+ /*
+ * Goal:
+diff -urNp linux-2.6.31.7/net/mac80211/pm.c linux-2.6.31.7/net/mac80211/pm.c
+--- linux-2.6.31.7/net/mac80211/pm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/pm.c 2009-12-08 17:39:49.668664792 -0500
+@@ -103,7 +103,7 @@ int __ieee80211_suspend(struct ieee80211
+ }
+
+ /* stop hardware - this must stop RX */
+- if (local->open_count) {
++ if (atomic_read(&local->open_count)) {
+ ieee80211_led_radio(local, false);
+ drv_stop(local);
+ }
+diff -urNp linux-2.6.31.7/net/mac80211/rate.c linux-2.6.31.7/net/mac80211/rate.c
+--- linux-2.6.31.7/net/mac80211/rate.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/rate.c 2009-12-08 17:39:49.668664792 -0500
+@@ -258,7 +258,7 @@ int ieee80211_init_rate_ctrl_alg(struct
+ struct rate_control_ref *ref, *old;
+
+ ASSERT_RTNL();
+- if (local->open_count || netif_running(local->mdev))
++ if (atomic_read(&local->open_count) || netif_running(local->mdev))
+ return -EBUSY;
+
+ ref = rate_control_alloc(name, local);
+diff -urNp linux-2.6.31.7/net/mac80211/rc80211_minstrel_debugfs.c linux-2.6.31.7/net/mac80211/rc80211_minstrel_debugfs.c
+--- linux-2.6.31.7/net/mac80211/rc80211_minstrel_debugfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/rc80211_minstrel_debugfs.c 2009-12-08 17:39:49.668664792 -0500
+@@ -139,7 +139,7 @@ minstrel_stats_release(struct inode *ino
+ return 0;
+ }
+
+-static struct file_operations minstrel_stat_fops = {
++static const struct file_operations minstrel_stat_fops = {
+ .owner = THIS_MODULE,
+ .open = minstrel_stats_open,
+ .read = minstrel_stats_read,
+diff -urNp linux-2.6.31.7/net/mac80211/rc80211_pid_debugfs.c linux-2.6.31.7/net/mac80211/rc80211_pid_debugfs.c
+--- linux-2.6.31.7/net/mac80211/rc80211_pid_debugfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/rc80211_pid_debugfs.c 2009-12-08 17:39:49.668664792 -0500
+@@ -198,7 +198,7 @@ static ssize_t rate_control_pid_events_r
+
+ #undef RC_PID_PRINT_BUF_SIZE
+
+-static struct file_operations rc_pid_fop_events = {
++static const struct file_operations rc_pid_fop_events = {
+ .owner = THIS_MODULE,
+ .read = rate_control_pid_events_read,
+ .poll = rate_control_pid_events_poll,
+diff -urNp linux-2.6.31.7/net/mac80211/util.c linux-2.6.31.7/net/mac80211/util.c
+--- linux-2.6.31.7/net/mac80211/util.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/mac80211/util.c 2009-12-08 17:39:49.669914966 -0500
+@@ -991,7 +991,7 @@ int ieee80211_reconfig(struct ieee80211_
+ local->suspended = false;
+
+ /* restart hardware */
+- if (local->open_count) {
++ if (atomic_read(&local->open_count)) {
+ res = drv_start(local);
+
+ ieee80211_led_radio(local, true);
+diff -urNp linux-2.6.31.7/net/packet/af_packet.c linux-2.6.31.7/net/packet/af_packet.c
+--- linux-2.6.31.7/net/packet/af_packet.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/packet/af_packet.c 2009-12-08 17:39:49.669914966 -0500
+@@ -2086,7 +2086,7 @@ static void packet_mm_close(struct vm_ar
+ atomic_dec(&pkt_sk(sk)->mapped);
+ }
+
+-static struct vm_operations_struct packet_mmap_ops = {
++static const struct vm_operations_struct packet_mmap_ops = {
+ .open = packet_mm_open,
+ .close =packet_mm_close,
+ };
+diff -urNp linux-2.6.31.7/net/sctp/socket.c linux-2.6.31.7/net/sctp/socket.c
+--- linux-2.6.31.7/net/sctp/socket.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/sctp/socket.c 2009-12-08 17:39:49.671694409 -0500
+@@ -1471,7 +1471,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc
+ struct sctp_sndrcvinfo *sinfo;
+ struct sctp_initmsg *sinit;
+ sctp_assoc_t associd = 0;
+- sctp_cmsgs_t cmsgs = { NULL };
++ sctp_cmsgs_t cmsgs = { NULL, NULL };
+ int err;
+ sctp_scope_t scope;
+ long timeo;
+@@ -5790,7 +5790,6 @@ pp_found:
+ */
+ int reuse = sk->sk_reuse;
+ struct sock *sk2;
+- struct hlist_node *node;
+
+ SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
+ if (pp->fastreuse && sk->sk_reuse &&
+diff -urNp linux-2.6.31.7/net/socket.c linux-2.6.31.7/net/socket.c
+--- linux-2.6.31.7/net/socket.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/socket.c 2009-12-08 17:39:49.672693668 -0500
+@@ -86,6 +86,7 @@
+ #include <linux/audit.h>
+ #include <linux/wireless.h>
+ #include <linux/nsproxy.h>
++#include <linux/in.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -96,6 +97,21 @@
+ #include <net/sock.h>
+ #include <linux/netfilter.h>
+
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++ const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_server_other(const struct socket *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_bind(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_listen(struct socket * sock);
++extern int gr_search_accept(struct socket * sock);
++extern int gr_search_socket(const int domain, const int type,
++ const int protocol);
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+@@ -285,7 +301,7 @@ static int init_inodecache(void)
+ return 0;
+ }
+
+-static struct super_operations sockfs_ops = {
++static const struct super_operations sockfs_ops = {
+ .alloc_inode = sock_alloc_inode,
+ .destroy_inode =sock_destroy_inode,
+ .statfs = simple_statfs,
+@@ -299,7 +315,7 @@ static int sockfs_get_sb(struct file_sys
+ mnt);
+ }
+
+-static struct vfsmount *sock_mnt __read_mostly;
++struct vfsmount *sock_mnt __read_mostly;
+
+ static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+@@ -1283,6 +1299,16 @@ SYSCALL_DEFINE3(socket, int, family, int
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
++ if(!gr_search_socket(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_handle_sock_all(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+@@ -1415,6 +1441,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ if (err >= 0) {
++ if (gr_handle_sock_server((struct sockaddr *)&address)) {
++ err = -EACCES;
++ goto error;
++ }
++ err = gr_search_bind(sock, (struct sockaddr_in *)&address);
++ if (err)
++ goto error;
++
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+@@ -1423,6 +1457,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
+ (struct sockaddr *)
+ &address, addrlen);
+ }
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1446,10 +1481,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+
++ if (gr_handle_sock_server_other(sock)) {
++ err = -EPERM;
++ goto error;
++ }
++
++ err = gr_search_listen(sock);
++ if (err)
++ goto error;
++
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1492,6 +1537,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
+ newsock->type = sock->type;
+ newsock->ops = sock->ops;
+
++ if (gr_handle_sock_server_other(sock)) {
++ err = -EPERM;
++ sock_release(newsock);
++ goto out_put;
++ }
++
++ err = gr_search_accept(sock);
++ if (err) {
++ sock_release(newsock);
++ goto out_put;
++ }
++
+ /*
+ * We don't need try_module_get here, as the listening socket (sock)
+ * has the protocol module (sock->ops->owner) held.
+@@ -1534,6 +1591,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
+ fd_install(newfd, newfile);
+ err = newfd;
+
++ gr_attach_curr_ip(newsock->sk);
++
+ out_put:
+ fput_light(sock->file, fput_needed);
+ out:
+@@ -1571,6 +1630,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
+ int, addrlen)
+ {
+ struct socket *sock;
++ struct sockaddr *sck;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+@@ -1581,6 +1641,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
+ if (err < 0)
+ goto out_put;
+
++ sck = (struct sockaddr *)&address;
++
++ if (gr_handle_sock_client(sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
++ err = gr_search_connect(sock, (struct sockaddr_in *)sck);
++ if (err)
++ goto out_put;
++
+ err =
+ security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+ if (err)
+diff -urNp linux-2.6.31.7/net/sunrpc/rpc_pipe.c linux-2.6.31.7/net/sunrpc/rpc_pipe.c
+--- linux-2.6.31.7/net/sunrpc/rpc_pipe.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/sunrpc/rpc_pipe.c 2009-12-08 17:39:49.672693668 -0500
+@@ -858,7 +858,7 @@ EXPORT_SYMBOL_GPL(rpc_unlink);
+ /*
+ * populate the filesystem
+ */
+-static struct super_operations s_ops = {
++static const struct super_operations s_ops = {
+ .alloc_inode = rpc_alloc_inode,
+ .destroy_inode = rpc_destroy_inode,
+ .statfs = simple_statfs,
+diff -urNp linux-2.6.31.7/net/unix/af_unix.c linux-2.6.31.7/net/unix/af_unix.c
+--- linux-2.6.31.7/net/unix/af_unix.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/unix/af_unix.c 2009-12-08 17:39:49.673909026 -0500
+@@ -734,6 +734,12 @@ static struct sock *unix_find_other(stru
+ err = -ECONNREFUSED;
+ if (!S_ISSOCK(inode->i_mode))
+ goto put_fail;
++
++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
++ err = -EACCES;
++ goto put_fail;
++ }
++
+ u = unix_find_socket_byinode(net, inode);
+ if (!u)
+ goto put_fail;
+@@ -754,6 +760,13 @@ static struct sock *unix_find_other(stru
+ if (u) {
+ struct dentry *dentry;
+ dentry = unix_sk(u)->dentry;
++
++ if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
++ err = -EPERM;
++ sock_put(u);
++ goto fail;
++ }
++
+ if (dentry)
+ touch_atime(unix_sk(u)->mnt, dentry);
+ } else
+@@ -839,11 +852,18 @@ static int unix_bind(struct socket *sock
+ err = security_path_mknod(&nd.path, dentry, mode, 0);
+ if (err)
+ goto out_mknod_drop_write;
++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
++ err = -EACCES;
++ goto out_mknod_drop_write;
++ }
+ err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
+ out_mknod_drop_write:
+ mnt_drop_write(nd.path.mnt);
+ if (err)
+ goto out_mknod_dput;
++
++ gr_handle_create(dentry, nd.path.mnt);
++
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(nd.path.dentry);
+ nd.path.dentry = dentry;
+@@ -861,6 +881,10 @@ out_mknod_drop_write:
+ goto out_unlock;
+ }
+
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ sk->sk_peercred.pid = current->pid;
++#endif
++
+ list = &unix_socket_table[addr->hash];
+ } else {
+ list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
+diff -urNp linux-2.6.31.7/net/xfrm/xfrm_proc.c linux-2.6.31.7/net/xfrm/xfrm_proc.c
+--- linux-2.6.31.7/net/xfrm/xfrm_proc.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/net/xfrm/xfrm_proc.c 2009-12-08 17:39:49.673909026 -0500
+@@ -60,7 +60,7 @@ static int xfrm_statistics_seq_open(stru
+ return single_open_net(inode, file, xfrm_statistics_seq_show);
+ }
+
+-static struct file_operations xfrm_statistics_seq_fops = {
++static const struct file_operations xfrm_statistics_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = xfrm_statistics_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.31.7/samples/kobject/kset-example.c linux-2.6.31.7/samples/kobject/kset-example.c
+--- linux-2.6.31.7/samples/kobject/kset-example.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/samples/kobject/kset-example.c 2009-12-08 17:39:49.673909026 -0500
+@@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
+ }
+
+ /* Our custom sysfs_ops that we will associate with our ktype later on */
+-static struct sysfs_ops foo_sysfs_ops = {
++static const struct sysfs_ops foo_sysfs_ops = {
+ .show = foo_attr_show,
+ .store = foo_attr_store,
+ };
+diff -urNp linux-2.6.31.7/samples/markers/marker-example.c linux-2.6.31.7/samples/markers/marker-example.c
+--- linux-2.6.31.7/samples/markers/marker-example.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/samples/markers/marker-example.c 2009-12-08 17:39:49.674888977 -0500
+@@ -26,7 +26,7 @@ static int my_open(struct inode *inode,
+ return -EPERM;
+ }
+
+-static struct file_operations mark_ops = {
++static const struct file_operations mark_ops = {
+ .open = my_open,
+ };
+
+diff -urNp linux-2.6.31.7/samples/tracepoints/tracepoint-sample.c linux-2.6.31.7/samples/tracepoints/tracepoint-sample.c
+--- linux-2.6.31.7/samples/tracepoints/tracepoint-sample.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/samples/tracepoints/tracepoint-sample.c 2009-12-08 17:39:49.674888977 -0500
+@@ -28,7 +28,7 @@ static int my_open(struct inode *inode,
+ return -EPERM;
+ }
+
+-static struct file_operations mark_ops = {
++static const struct file_operations mark_ops = {
+ .open = my_open,
+ };
+
+diff -urNp linux-2.6.31.7/scripts/basic/fixdep.c linux-2.6.31.7/scripts/basic/fixdep.c
+--- linux-2.6.31.7/scripts/basic/fixdep.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/basic/fixdep.c 2009-12-08 17:39:49.674888977 -0500
+@@ -224,9 +224,9 @@ void use_config(char *m, int slen)
+
+ void parse_config_file(char *map, size_t len)
+ {
+- int *end = (int *) (map + len);
++ unsigned int *end = (unsigned int *) (map + len);
+ /* start at +1, so that p can never be < map */
+- int *m = (int *) map + 1;
++ unsigned int *m = (unsigned int *) map + 1;
+ char *p, *q;
+
+ for (; m < end; m++) {
+@@ -373,7 +373,7 @@ void print_deps(void)
+ void traps(void)
+ {
+ static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+- int *p = (int *)test;
++ unsigned int *p = (unsigned int *)test;
+
+ if (*p != INT_CONF) {
+ fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
+diff -urNp linux-2.6.31.7/scripts/kallsyms.c linux-2.6.31.7/scripts/kallsyms.c
+--- linux-2.6.31.7/scripts/kallsyms.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/kallsyms.c 2009-12-08 17:39:49.675663294 -0500
+@@ -43,10 +43,10 @@ struct text_range {
+
+ static unsigned long long _text;
+ static struct text_range text_ranges[] = {
+- { "_stext", "_etext" },
+- { "_sinittext", "_einittext" },
+- { "_stext_l1", "_etext_l1" }, /* Blackfin on-chip L1 inst SRAM */
+- { "_stext_l2", "_etext_l2" }, /* Blackfin on-chip L2 SRAM */
++ { "_stext", "_etext", 0, 0 },
++ { "_sinittext", "_einittext", 0, 0 },
++ { "_stext_l1", "_etext_l1", 0, 0 }, /* Blackfin on-chip L1 inst SRAM */
++ { "_stext_l2", "_etext_l2", 0, 0 }, /* Blackfin on-chip L2 SRAM */
+ };
+ #define text_range_text (&text_ranges[0])
+ #define text_range_inittext (&text_ranges[1])
+diff -urNp linux-2.6.31.7/scripts/kconfig/lkc.h linux-2.6.31.7/scripts/kconfig/lkc.h
+--- linux-2.6.31.7/scripts/kconfig/lkc.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/kconfig/lkc.h 2009-12-08 17:39:49.675663294 -0500
+@@ -97,7 +97,7 @@ void menu_add_expr(enum prop_type type,
+ void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
+ void menu_add_option(int token, char *arg);
+ void menu_finalize(struct menu *parent);
+-void menu_set_type(int type);
++void menu_set_type(unsigned int type);
+
+ /* util.c */
+ struct file *file_lookup(const char *name);
+diff -urNp linux-2.6.31.7/scripts/kconfig/mconf.c linux-2.6.31.7/scripts/kconfig/mconf.c
+--- linux-2.6.31.7/scripts/kconfig/mconf.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/kconfig/mconf.c 2009-12-08 17:39:49.675663294 -0500
+@@ -361,7 +361,7 @@ static char filename[PATH_MAX+1];
+ static void set_config_filename(const char *config_filename)
+ {
+ static char menu_backtitle[PATH_MAX+128];
+- int size;
++ unsigned int size;
+ struct symbol *sym;
+
+ sym = sym_lookup("KERNELVERSION", 0);
+diff -urNp linux-2.6.31.7/scripts/kconfig/menu.c linux-2.6.31.7/scripts/kconfig/menu.c
+--- linux-2.6.31.7/scripts/kconfig/menu.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/kconfig/menu.c 2009-12-08 17:39:49.676692238 -0500
+@@ -104,7 +104,7 @@ void menu_add_dep(struct expr *dep)
+ current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep));
+ }
+
+-void menu_set_type(int type)
++void menu_set_type(unsigned int type)
+ {
+ struct symbol *sym = current_entry->sym;
+
+diff -urNp linux-2.6.31.7/scripts/mod/file2alias.c linux-2.6.31.7/scripts/mod/file2alias.c
+--- linux-2.6.31.7/scripts/mod/file2alias.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/mod/file2alias.c 2009-12-08 17:39:49.676692238 -0500
+@@ -72,7 +72,7 @@ static void device_id_check(const char *
+ unsigned long size, unsigned long id_size,
+ void *symval)
+ {
+- int i;
++ unsigned int i;
+
+ if (size % id_size || size < id_size) {
+ if (cross_build != 0)
+@@ -102,7 +102,7 @@ static void device_id_check(const char *
+ /* USB is special because the bcdDevice can be matched against a numeric range */
+ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
+ static void do_usb_entry(struct usb_device_id *id,
+- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
+ unsigned char range_lo, unsigned char range_hi,
+ struct module *mod)
+ {
+@@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
+ for (i = 0; i < count; i++) {
+ const char *id = (char *)devs[i].id;
+ char acpi_id[sizeof(devs[0].id)];
+- int j;
++ unsigned int j;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
+
+ for (j = 0; j < PNP_MAX_DEVICES; j++) {
+ const char *id = (char *)card->devs[j].id;
+- int i2, j2;
++ unsigned int i2, j2;
+ int dup = 0;
+
+ if (!id[0])
+@@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
+ /* add an individual alias for every device entry */
+ if (!dup) {
+ char acpi_id[sizeof(card->devs[0].id)];
+- int k;
++ unsigned int k;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -690,7 +690,7 @@ static void dmi_ascii_filter(char *d, co
+ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+ char *alias)
+ {
+- int i, j;
++ unsigned int i, j;
+
+ sprintf(alias, "dmi*");
+
+diff -urNp linux-2.6.31.7/scripts/mod/modpost.c linux-2.6.31.7/scripts/mod/modpost.c
+--- linux-2.6.31.7/scripts/mod/modpost.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/mod/modpost.c 2009-12-08 17:39:49.677782662 -0500
+@@ -835,6 +835,7 @@ enum mismatch {
+ INIT_TO_EXIT,
+ EXIT_TO_INIT,
+ EXPORT_TO_INIT_EXIT,
++ DATA_TO_TEXT
+ };
+
+ struct sectioncheck {
+@@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
+ .fromsec = { "__ksymtab*", NULL },
+ .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .mismatch = EXPORT_TO_INIT_EXIT
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .tosec = { TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
+ "Fix this by removing the %sannotation of %s "
+ "or drop the export.\n",
+ tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
++ case DATA_TO_TEXT:
++/*
++ fprintf(stderr,
++ "The variable %s references\n"
++ "the %s %s%s%s\n",
++ fromsym, to, sec2annotation(tosec), tosym, to_p);
++*/
++ break;
+ case NO_MISMATCH:
+ /* To get warnings on missing members */
+ break;
+@@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
+ va_end(ap);
+ }
+
+-void buf_write(struct buffer *buf, const char *s, int len)
++void buf_write(struct buffer *buf, const char *s, unsigned int len)
+ {
+ if (buf->size - buf->pos < len) {
+ buf->size += len + SZ;
+@@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
+ if (fstat(fileno(file), &st) < 0)
+ goto close_write;
+
+- if (st.st_size != b->pos)
++ if (st.st_size != (off_t)b->pos)
+ goto close_write;
+
+ tmp = NOFAIL(malloc(b->pos));
+diff -urNp linux-2.6.31.7/scripts/mod/modpost.h linux-2.6.31.7/scripts/mod/modpost.h
+--- linux-2.6.31.7/scripts/mod/modpost.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/mod/modpost.h 2009-12-08 17:39:49.677782662 -0500
+@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
+
+ struct buffer {
+ char *p;
+- int pos;
+- int size;
++ unsigned int pos;
++ unsigned int size;
+ };
+
+ void __attribute__((format(printf, 2, 3)))
+ buf_printf(struct buffer *buf, const char *fmt, ...);
+
+ void
+-buf_write(struct buffer *buf, const char *s, int len);
++buf_write(struct buffer *buf, const char *s, unsigned int len);
+
+ struct module {
+ struct module *next;
+diff -urNp linux-2.6.31.7/scripts/mod/sumversion.c linux-2.6.31.7/scripts/mod/sumversion.c
+--- linux-2.6.31.7/scripts/mod/sumversion.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/mod/sumversion.c 2009-12-08 17:39:49.677782662 -0500
+@@ -457,7 +457,7 @@ static void write_version(const char *fi
+ goto out;
+ }
+
+- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
+ warn("writing sum in %s failed: %s\n",
+ filename, strerror(errno));
+ goto out;
+diff -urNp linux-2.6.31.7/scripts/pnmtologo.c linux-2.6.31.7/scripts/pnmtologo.c
+--- linux-2.6.31.7/scripts/pnmtologo.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/scripts/pnmtologo.c 2009-12-08 17:39:49.677782662 -0500
+@@ -237,14 +237,14 @@ static void write_header(void)
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_data[] = {\n",
+ logoname);
+ }
+
+ static void write_footer(void)
+ {
+ fputs("\n};\n\n", out);
+- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
++ fprintf(out, "const struct linux_logo %s = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_clut[] = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+diff -urNp linux-2.6.31.7/security/commoncap.c linux-2.6.31.7/security/commoncap.c
+--- linux-2.6.31.7/security/commoncap.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/security/commoncap.c 2009-12-08 17:39:49.678817106 -0500
+@@ -27,7 +27,7 @@
+ #include <linux/sched.h>
+ #include <linux/prctl.h>
+ #include <linux/securebits.h>
+-
++#include <net/sock.h>
+ /*
+ * If a non-root user executes a setuid-root binary in
+ * !secure(SECURE_NOROOT) mode, then we raise capabilities.
+@@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
+ }
+ }
+
++extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
++
+ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
+ {
+- NETLINK_CB(skb).eff_cap = current_cap();
++ NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
+ return 0;
+ }
+
+diff -urNp linux-2.6.31.7/security/integrity/ima/ima_fs.c linux-2.6.31.7/security/integrity/ima/ima_fs.c
+--- linux-2.6.31.7/security/integrity/ima/ima_fs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/security/integrity/ima/ima_fs.c 2009-12-08 17:39:49.678817106 -0500
+@@ -43,7 +43,7 @@ static ssize_t ima_show_htable_violation
+ return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
+ }
+
+-static struct file_operations ima_htable_violations_ops = {
++static const struct file_operations ima_htable_violations_ops = {
+ .read = ima_show_htable_violations
+ };
+
+@@ -55,7 +55,7 @@ static ssize_t ima_show_measurements_cou
+
+ }
+
+-static struct file_operations ima_measurements_count_ops = {
++static const struct file_operations ima_measurements_count_ops = {
+ .read = ima_show_measurements_count
+ };
+
+@@ -146,7 +146,7 @@ static int ima_measurements_show(struct
+ return 0;
+ }
+
+-static struct seq_operations ima_measurments_seqops = {
++static const struct seq_operations ima_measurments_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+@@ -158,7 +158,7 @@ static int ima_measurements_open(struct
+ return seq_open(file, &ima_measurments_seqops);
+ }
+
+-static struct file_operations ima_measurements_ops = {
++static const struct file_operations ima_measurements_ops = {
+ .open = ima_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -221,7 +221,7 @@ static int ima_ascii_measurements_show(s
+ return 0;
+ }
+
+-static struct seq_operations ima_ascii_measurements_seqops = {
++static const struct seq_operations ima_ascii_measurements_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+@@ -233,7 +233,7 @@ static int ima_ascii_measurements_open(s
+ return seq_open(file, &ima_ascii_measurements_seqops);
+ }
+
+-static struct file_operations ima_ascii_measurements_ops = {
++static const struct file_operations ima_ascii_measurements_ops = {
+ .open = ima_ascii_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+@@ -313,7 +313,7 @@ static int ima_release_policy(struct ino
+ return 0;
+ }
+
+-static struct file_operations ima_measure_policy_ops = {
++static const struct file_operations ima_measure_policy_ops = {
+ .open = ima_open_policy,
+ .write = ima_write_policy,
+ .release = ima_release_policy
+diff -urNp linux-2.6.31.7/security/Kconfig linux-2.6.31.7/security/Kconfig
+--- linux-2.6.31.7/security/Kconfig 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/security/Kconfig 2009-12-08 17:39:49.678817106 -0500
+@@ -4,6 +4,465 @@
+
+ menu "Security options"
+
++source grsecurity/Kconfig
++
++menu "PaX"
++
++config PAX
++ bool "Enable various PaX features"
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86)
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
++ is the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS support.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF
++ program header then you MUST enable this option otherwise they
++ will not get any protection.
++
++ Note that if you enable PT_PAX_FLAGS marking support as well,
++ the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF
++ program header then you MUST enable the EI_PAX marking support
++ otherwise they will not get any protection.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_HAVE_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_initial_flags() itself,
++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++ config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++ config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++ config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86)
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 without hardware non-executable bit support there is a
++ variable but usually low performance impact, however on Intel's
++ P4 core based CPUs it is very high so you should not enable this
++ for kernels meant to be used on such CPUs.
++
++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
++ with hardware non-executable bit support there is no performance
++ impact, on ppc the impact is negligible.
++
++ Note that several architectures require various emulations due to
++ badly designed userland ABIs, this will cause a performance impact
++ but will disappear as soon as userland is fixed. For example, ppc
++ userland MUST have been built with secure-plt by a recent toolchain.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ depends on PAX_NOEXEC && X86_32
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has a very small performance impact, however applications
++ will be limited to a 1.5 GB address space instead of the normal
++ 3 GB.
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
++ default y if PARISC
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
++ your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && PARISC
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc you MUST enable this option, otherwise your system will
++ not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_NOELFRELOCS
++ bool "Disallow ELF text relocations"
++ depends on PAX_MPROTECT && !PAX_ETEXECRELOCS && (IA64 || PPC || X86)
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ then enable this option otherwise be careful as you may not even
++ be able to boot or log on your system (for example, some PAM
++ modules are erroneously compiled as non-PIC by default).
++
++ NOTE: if you are using dynamic ELF executables (as suggested
++ when using ASLR) then you must have made sure that you linked
++ your files using the PIC version of crt1 (the et_dyn.tar.gz package
++ referenced there has already been updated to support this).
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC32 || SPARC64)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool 'Emulate old glibc resolver stub'
++ depends on PAX_EMUPLT && (SPARC32 || SPARC64)
++ default n
++ help
++ This option is needed if userland has an old glibc (before 2.4)
++ that puts a 'save' instruction into the runtime generated resolver
++ stub that needs special emulation.
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ depends on PAX_NOEXEC && X86 && (!X86_32 || X86_WP_WORKS_OK)
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ depends on PAX_ASLR && X86_TSC && X86_32
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++endmenu
++
++menu "Miscellaneous hardening features"
++
++config PAX_MEMORY_SANITIZE
++ bool "Sanitize all freed memory"
++ help
++ By saying Y here the kernel will erase memory pages as soon as they
++ are freed. This in turn reduces the lifetime of data stored in the
++ pages, making it less likely that sensitive information such as
++ passwords, cryptographic secrets, etc stay in memory for too long.
++
++ This is especially useful for programs whose runtime is short, long
++ lived processes and the kernel itself benefit from this as long as
++ they operate on whole memory pages and ensure timely freeing of pages
++ that may hold sensitive information.
++
++ The tradeoff is performance impact, on a single CPU system kernel
++ compilation sees a 3% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note that this feature does not protect data stored in live pages,
++ e.g., process memory swapped to disk may stay there for a long time.
++
++config PAX_MEMORY_UDEREF
++ bool "Prevent invalid userland pointer dereference"
++ depends on X86_32 && !UML_X86
++ help
++ By saying Y here the kernel will be prevented from dereferencing
++ userland pointers in contexts where the kernel expects only kernel
++ pointers. This is both a useful runtime debugging feature and a
++ security measure that prevents exploiting a class of kernel bugs.
++
++ The tradeoff is that some virtualization solutions may experience
++ a huge slowdown and therefore you should not enable this feature
++ for kernels meant to run in such environments. Whether a given VM
++ solution is affected or not is best determined by simply trying it
++ out, the performance impact will be obvious right on boot as this
++ mechanism engages from very early on. A good rule of thumb is that
++ VMs running on CPUs without hardware virtualization support (i.e.,
++ the majority of IA-32 CPUs) will likely experience the slowdown.
++
++config PAX_REFCOUNT
++ bool "Prevent various kernel object reference counter overflows"
++ depends on GRKERNSEC && (X86 || SPARC64)
++ help
++ By saying Y here the kernel will detect and prevent overflowing
++ various (but not all) kinds of object reference counters. Such
++ overflows can normally occur due to bugs only and are often, if
++ not always, exploitable.
++
++ The tradeoff is that data structures protected by an overflowed
++ refcount will never be freed and therefore will leak memory. Note
++ that this leak also happens even without this protection but in
++ that case the overflow can eventually trigger the freeing of the
++ data structure while it is still being used elsewhere, resulting
++ in the exploitable situation that this feature prevents.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_USERCOPY
++ bool "Bounds check heap object copies between kernel and userland"
++ depends on X86 || PPC32 || PPC64 || SPARC32 || SPARC64
++ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++ help
++ By saying Y here the kernel will enforce the size of heap objects
++ when they are copied in either direction between the kernel and
++ userland, even if only a part of the heap object is copied.
++
++ Specifically, this checking prevents information leaking from the
++ kernel heap during kernel to userland copies (if the kernel heap
++ object is otherwise fully initialized) and prevents kernel heap
++ overflows during userland to kernel copies.
++
++ Note that the current implementation provides the strictest checks
++ for the SLUB allocator.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++endmenu
++
++endmenu
++
+ config KEYS
+ bool "Enable access key retention support"
+ help
+diff -urNp linux-2.6.31.7/security/min_addr.c linux-2.6.31.7/security/min_addr.c
+--- linux-2.6.31.7/security/min_addr.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/security/min_addr.c 2009-12-08 17:39:49.679913731 -0500
+@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
+ */
+ static void update_mmap_min_addr(void)
+ {
++#ifndef SPARC
+ #ifdef CONFIG_LSM_MMAP_MIN_ADDR
+ if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+ mmap_min_addr = dac_mmap_min_addr;
+@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
+ #else
+ mmap_min_addr = dac_mmap_min_addr;
+ #endif
++#endif
+ }
+
+ /*
+@@ -33,6 +35,9 @@ int mmap_min_addr_handler(struct ctl_tab
+ {
+ int ret;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+
+ update_mmap_min_addr();
+diff -urNp linux-2.6.31.7/security/smack/smackfs.c linux-2.6.31.7/security/smack/smackfs.c
+--- linux-2.6.31.7/security/smack/smackfs.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/security/smack/smackfs.c 2009-12-08 17:39:49.679913731 -0500
+@@ -187,7 +187,7 @@ static void load_seq_stop(struct seq_fil
+ /* No-op */
+ }
+
+-static struct seq_operations load_seq_ops = {
++static const struct seq_operations load_seq_ops = {
+ .start = load_seq_start,
+ .next = load_seq_next,
+ .show = load_seq_show,
+@@ -503,7 +503,7 @@ static void cipso_seq_stop(struct seq_fi
+ /* No-op */
+ }
+
+-static struct seq_operations cipso_seq_ops = {
++static const struct seq_operations cipso_seq_ops = {
+ .start = cipso_seq_start,
+ .stop = cipso_seq_stop,
+ .next = cipso_seq_next,
+@@ -697,7 +697,7 @@ static void netlbladdr_seq_stop(struct s
+ /* No-op */
+ }
+
+-static struct seq_operations netlbladdr_seq_ops = {
++static const struct seq_operations netlbladdr_seq_ops = {
+ .start = netlbladdr_seq_start,
+ .stop = netlbladdr_seq_stop,
+ .next = netlbladdr_seq_next,
+diff -urNp linux-2.6.31.7/sound/aoa/codecs/onyx.c linux-2.6.31.7/sound/aoa/codecs/onyx.c
+--- linux-2.6.31.7/sound/aoa/codecs/onyx.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/aoa/codecs/onyx.c 2009-12-08 17:39:49.695527389 -0500
+@@ -53,7 +53,7 @@ struct onyx {
+ spdif_locked:1,
+ analog_locked:1,
+ original_mute:2;
+- int open_count;
++ atomic_t open_count;
+ struct codec_info *codec_info;
+
+ /* mutex serializes concurrent access to the device
+@@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count++;
++ atomic_inc(&onyx->open_count);
+ mutex_unlock(&onyx->mutex);
+
+ return 0;
+@@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count--;
+- if (!onyx->open_count)
++ if (atomic_dec_and_test(&onyx->open_count))
+ onyx->spdif_locked = onyx->analog_locked = 0;
+ mutex_unlock(&onyx->mutex);
+
+diff -urNp linux-2.6.31.7/sound/core/oss/pcm_oss.c linux-2.6.31.7/sound/core/oss/pcm_oss.c
+--- linux-2.6.31.7/sound/core/oss/pcm_oss.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/core/oss/pcm_oss.c 2009-12-08 17:39:49.695527389 -0500
+@@ -2943,8 +2943,8 @@ static void snd_pcm_oss_proc_done(struct
+ }
+ }
+ #else /* !CONFIG_SND_VERBOSE_PROCFS */
+-#define snd_pcm_oss_proc_init(pcm)
+-#define snd_pcm_oss_proc_done(pcm)
++#define snd_pcm_oss_proc_init(pcm) do {} while (0)
++#define snd_pcm_oss_proc_done(pcm) do {} while (0)
+ #endif /* CONFIG_SND_VERBOSE_PROCFS */
+
+ /*
+diff -urNp linux-2.6.31.7/sound/core/seq/seq_lock.h linux-2.6.31.7/sound/core/seq/seq_lock.h
+--- linux-2.6.31.7/sound/core/seq/seq_lock.h 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/core/seq/seq_lock.h 2009-12-08 17:39:49.696810303 -0500
+@@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo
+ #else /* SMP || CONFIG_SND_DEBUG */
+
+ typedef spinlock_t snd_use_lock_t; /* dummy */
+-#define snd_use_lock_init(lockp) /**/
+-#define snd_use_lock_use(lockp) /**/
+-#define snd_use_lock_free(lockp) /**/
+-#define snd_use_lock_sync(lockp) /**/
++#define snd_use_lock_init(lockp) do {} while (0)
++#define snd_use_lock_use(lockp) do {} while (0)
++#define snd_use_lock_free(lockp) do {} while (0)
++#define snd_use_lock_sync(lockp) do {} while (0)
+
+ #endif /* SMP || CONFIG_SND_DEBUG */
+
+diff -urNp linux-2.6.31.7/sound/drivers/mts64.c linux-2.6.31.7/sound/drivers/mts64.c
+--- linux-2.6.31.7/sound/drivers/mts64.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/drivers/mts64.c 2009-12-08 17:39:49.696810303 -0500
+@@ -65,7 +65,7 @@ struct mts64 {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ atomic_t open_count;
+ int current_midi_output_port;
+ int current_midi_input_port;
+ u8 mode[MTS64_NUM_INPUT_PORTS];
+@@ -695,7 +695,7 @@ static int snd_mts64_rawmidi_open(struct
+ {
+ struct mts64 *mts = substream->rmidi->private_data;
+
+- if (mts->open_count == 0) {
++ if (atomic_read(&mts->open_count) == 0) {
+ /* We don't need a spinlock here, because this is just called
+ if the device has not been opened before.
+ So there aren't any IRQs from the device */
+@@ -703,7 +703,7 @@ static int snd_mts64_rawmidi_open(struct
+
+ msleep(50);
+ }
+- ++(mts->open_count);
++ atomic_inc(&mts->open_count);
+
+ return 0;
+ }
+@@ -713,8 +713,7 @@ static int snd_mts64_rawmidi_close(struc
+ struct mts64 *mts = substream->rmidi->private_data;
+ unsigned long flags;
+
+- --(mts->open_count);
+- if (mts->open_count == 0) {
++ if (atomic_dec_return(&mts->open_count) == 0) {
+ /* We need the spinlock_irqsave here because we can still
+ have IRQs at this point */
+ spin_lock_irqsave(&mts->lock, flags);
+@@ -723,8 +722,8 @@ static int snd_mts64_rawmidi_close(struc
+
+ msleep(500);
+
+- } else if (mts->open_count < 0)
+- mts->open_count = 0;
++ } else if (atomic_read(&mts->open_count) < 0)
++ atomic_set(&mts->open_count, 0);
+
+ return 0;
+ }
+diff -urNp linux-2.6.31.7/sound/drivers/portman2x4.c linux-2.6.31.7/sound/drivers/portman2x4.c
+--- linux-2.6.31.7/sound/drivers/portman2x4.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/drivers/portman2x4.c 2009-12-08 17:39:49.696810303 -0500
+@@ -83,7 +83,7 @@ struct portman {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ atomic_t open_count;
+ int mode[PORTMAN_NUM_INPUT_PORTS];
+ struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
+ };
+diff -urNp linux-2.6.31.7/sound/pci/ac97/ac97_codec.c linux-2.6.31.7/sound/pci/ac97/ac97_codec.c
+--- linux-2.6.31.7/sound/pci/ac97/ac97_codec.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/pci/ac97/ac97_codec.c 2009-12-08 17:39:49.697905873 -0500
+@@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
+ }
+
+ /* build_ops to do nothing */
+-static struct snd_ac97_build_ops null_build_ops;
++static const struct snd_ac97_build_ops null_build_ops __read_only;
+
+ #ifdef CONFIG_SND_AC97_POWER_SAVE
+ static void do_update_power(struct work_struct *work)
+diff -urNp linux-2.6.31.7/sound/pci/ac97/ac97_patch.c linux-2.6.31.7/sound/pci/ac97/ac97_patch.c
+--- linux-2.6.31.7/sound/pci/ac97/ac97_patch.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/pci/ac97/ac97_patch.c 2009-12-08 17:39:49.699903046 -0500
+@@ -1501,7 +1501,7 @@ static const struct snd_ac97_res_table a
+ { AC97_VIDEO, 0x9f1f },
+ { AC97_AUX, 0x9f1f },
+ { AC97_PCM, 0x9f1f },
+- { } /* terminator */
++ { 0, 0 } /* terminator */
+ };
+
+ static int patch_ad1819(struct snd_ac97 * ac97)
+@@ -3876,7 +3876,7 @@ static struct snd_ac97_res_table lm4550_
+ { AC97_AUX, 0x1f1f },
+ { AC97_PCM, 0x1f1f },
+ { AC97_REC_GAIN, 0x0f0f },
+- { } /* terminator */
++ { 0, 0 } /* terminator */
+ };
+
+ static int patch_lm4550(struct snd_ac97 *ac97)
+diff -urNp linux-2.6.31.7/sound/pci/ens1370.c linux-2.6.31.7/sound/pci/ens1370.c
+--- linux-2.6.31.7/sound/pci/ens1370.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/pci/ens1370.c 2009-12-08 17:39:49.699903046 -0500
+@@ -452,7 +452,7 @@ static struct pci_device_id snd_audiopci
+ { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
+ { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
+ #endif
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
+diff -urNp linux-2.6.31.7/sound/pci/intel8x0.c linux-2.6.31.7/sound/pci/intel8x0.c
+--- linux-2.6.31.7/sound/pci/intel8x0.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/pci/intel8x0.c 2009-12-08 17:39:49.700909646 -0500
+@@ -444,7 +444,7 @@ static struct pci_device_id snd_intel8x0
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids);
+@@ -2105,7 +2105,7 @@ static struct ac97_quirk ac97_quirks[] _
+ .type = AC97_TUNE_HP_ONLY
+ },
+ #endif
+- { } /* terminator */
++ { 0, 0, 0, 0, NULL, 0 } /* terminator */
+ };
+
+ static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
+diff -urNp linux-2.6.31.7/sound/pci/intel8x0m.c linux-2.6.31.7/sound/pci/intel8x0m.c
+--- linux-2.6.31.7/sound/pci/intel8x0m.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/pci/intel8x0m.c 2009-12-08 17:39:49.701914331 -0500
+@@ -239,7 +239,7 @@ static struct pci_device_id snd_intel8x0
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
+ #endif
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids);
+@@ -1264,7 +1264,7 @@ static struct shortname_table {
+ { 0x5455, "ALi M5455" },
+ { 0x746d, "AMD AMD8111" },
+ #endif
+- { 0 },
++ { 0, NULL },
+ };
+
+ static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
+diff -urNp linux-2.6.31.7/sound/usb/usx2y/us122l.c linux-2.6.31.7/sound/usb/usx2y/us122l.c
+--- linux-2.6.31.7/sound/usb/usx2y/us122l.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/usb/usx2y/us122l.c 2009-12-08 17:39:49.702907609 -0500
+@@ -154,7 +154,7 @@ static void usb_stream_hwdep_vm_close(st
+ snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
+ }
+
+-static struct vm_operations_struct usb_stream_hwdep_vm_ops = {
++static const struct vm_operations_struct usb_stream_hwdep_vm_ops = {
+ .open = usb_stream_hwdep_vm_open,
+ .fault = usb_stream_hwdep_vm_fault,
+ .close = usb_stream_hwdep_vm_close,
+diff -urNp linux-2.6.31.7/sound/usb/usx2y/usX2Yhwdep.c linux-2.6.31.7/sound/usb/usx2y/usX2Yhwdep.c
+--- linux-2.6.31.7/sound/usb/usx2y/usX2Yhwdep.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/usb/usx2y/usX2Yhwdep.c 2009-12-08 17:39:49.702907609 -0500
+@@ -53,7 +53,7 @@ static int snd_us428ctls_vm_fault(struct
+ return 0;
+ }
+
+-static struct vm_operations_struct us428ctls_vm_ops = {
++static const struct vm_operations_struct us428ctls_vm_ops = {
+ .fault = snd_us428ctls_vm_fault,
+ };
+
+diff -urNp linux-2.6.31.7/sound/usb/usx2y/usx2yhwdeppcm.c linux-2.6.31.7/sound/usb/usx2y/usx2yhwdeppcm.c
+--- linux-2.6.31.7/sound/usb/usx2y/usx2yhwdeppcm.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/sound/usb/usx2y/usx2yhwdeppcm.c 2009-12-08 17:39:49.702907609 -0500
+@@ -697,7 +697,7 @@ static int snd_usX2Y_hwdep_pcm_vm_fault(
+ }
+
+
+-static struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = {
++static const struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = {
+ .open = snd_usX2Y_hwdep_pcm_vm_open,
+ .close = snd_usX2Y_hwdep_pcm_vm_close,
+ .fault = snd_usX2Y_hwdep_pcm_vm_fault,
+diff -urNp linux-2.6.31.7/usr/gen_init_cpio.c linux-2.6.31.7/usr/gen_init_cpio.c
+--- linux-2.6.31.7/usr/gen_init_cpio.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/usr/gen_init_cpio.c 2009-12-08 17:39:49.703907371 -0500
+@@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
+ int retval;
+ int rc = -1;
+ int namesize;
+- int i;
++ unsigned int i;
+
+ mode |= S_IFREG;
+
+@@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
+ *env_var = *expanded = '\0';
+ strncat(env_var, start + 2, end - start - 2);
+ strncat(expanded, new_location, start - new_location);
+- strncat(expanded, getenv(env_var), PATH_MAX);
+- strncat(expanded, end + 1, PATH_MAX);
++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
+ strncpy(new_location, expanded, PATH_MAX);
++ new_location[PATH_MAX] = 0;
+ } else
+ break;
+ }
+diff -urNp linux-2.6.31.7/virt/kvm/kvm_main.c linux-2.6.31.7/virt/kvm/kvm_main.c
+--- linux-2.6.31.7/virt/kvm/kvm_main.c 2009-11-09 19:32:31.000000000 -0500
++++ linux-2.6.31.7/virt/kvm/kvm_main.c 2009-12-08 17:39:49.703907371 -0500
+@@ -2353,6 +2353,9 @@ static struct miscdevice kvm_dev = {
+ KVM_MINOR,
+ "kvm",
+ &kvm_chardev_ops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+ static void hardware_enable(void *junk)
+@@ -2512,7 +2515,7 @@ static int vcpu_stat_get(void *_offset,
+
+ DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
+
+-static struct file_operations *stat_fops[] = {
++static const struct file_operations *stat_fops[] = {
+ [KVM_STAT_VCPU] = &vcpu_stat_fops,
+ [KVM_STAT_VM] = &vm_stat_fops,
+ };
+@@ -2584,7 +2587,7 @@ static void kvm_sched_out(struct preempt
+ kvm_arch_vcpu_put(vcpu);
+ }
+
+-int kvm_init(void *opaque, unsigned int vcpu_size,
++int kvm_init(const void *opaque, unsigned int vcpu_size,
+ struct module *module)
+ {
+ int r;