summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2009-12-09 08:36:22 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2009-12-09 08:36:22 +0000
commit397c7e2b8e1bd9284336357d27f6e5611e5d041a (patch)
treeb00ab1e61b490c633704971e13625bfb60062b4b
parentdf87584e85ac4305f8fa1f69a9bf47b9867a43b0 (diff)
downloadaports-397c7e2b8e1bd9284336357d27f6e5611e5d041a.tar.bz2
aports-397c7e2b8e1bd9284336357d27f6e5611e5d041a.tar.xz
testing/linux-grsec: upgrade to 2.6.30.10
-rw-r--r--testing/linux-grsec/APKBUILD126
-rw-r--r--testing/linux-grsec/grsecurity-2.1.14-2.6.30.8-200909262311.patch45257
-rw-r--r--testing/linux-grsec/kernelconfig4449
-rw-r--r--testing/linux-grsec/net-next-2.6.git-5ef12d98a19254ee5dc851bd83e214b43ec1f725.patch96
4 files changed, 49928 insertions, 0 deletions
diff --git a/testing/linux-grsec/APKBUILD b/testing/linux-grsec/APKBUILD
new file mode 100644
index 00000000..d8427cd9
--- /dev/null
+++ b/testing/linux-grsec/APKBUILD
@@ -0,0 +1,126 @@
+# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
+
+_flavor=grsec
+pkgname=linux-${_flavor}
+pkgver=2.6.30.10
+_kernver=2.6.30
+pkgrel=0
+pkgdesc="Linux kernel with grsecurity"
+url=http://grsecurity.net
+depends="mkinitfs linux-firmware"
+makedepends="perl installkernel"
+_config=${config:-kernelconfig}
+install=
+source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
+ ftp://ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2
+ grsecurity-2.1.14-2.6.30.8-200909262311.patch
+ net-next-2.6.git-5ef12d98a19254ee5dc851bd83e214b43ec1f725.patch
+ $_config
+ "
+subpackages="$pkgname-dev linux-firmware:firmware"
+license="GPL-2"
+
+_abi_release=${pkgver}-${_flavor}
+
+_prepare() {
+ cd "$srcdir"/linux-$_kernver
+ if [ "$_kernver" != "$pkgver" ]; then
+ bunzip2 -c < ../patch-$pkgver.bz2 | patch -p1 -N || return 1
+ fi
+
+ for i in ../*.diff ../*.patch; do
+ [ -f $i ] || continue
+ msg "Applying $i..."
+ patch -p1 -N < $i || return 1
+ done
+
+ mkdir -p "$srcdir"/build
+ cp "$srcdir"/$_config "$srcdir"/build/.config
+ make -C "$srcdir"/linux-$_kernver O="$srcdir"/build HOSTCC="$CC" \
+ silentoldconfig
+}
+
+# this is so we can do: 'abuild menuconfig' to reconfigure kernel
+menuconfig() {
+ _prepare
+ cd "$srcdir"/build
+ make menuconfig
+ cp .config "$startdir"/$_config
+}
+
+build() {
+ _prepare || return 1
+ cd "$srcdir"/build
+ make CC="$CC" || return 1
+
+ mkdir -p "$pkgdir"/boot "$pkgdir"/lib/modules
+ make modules_install install \
+ INSTALL_MOD_PATH="$pkgdir" \
+ INSTALL_PATH="$pkgdir"/boot
+
+# ln -s vmlinuz-${_abi_release} "${pkgdir}"/boot/$_flavor
+
+ rm -f "$pkgdir"/lib/modules/${_abi_release}/build \
+ "$pkgdir"/lib/modules/${_abi_release}/source
+ install -D include/config/kernel.release \
+ "$pkgdir"/usr/share/kernel/$_flavor/kernel.release
+}
+
+dev() {
+ # copy the only the parts that we really need for build 3rd party
+ # kernel modules and install those as /usr/src/linux-headers,
+ # simlar to what ubuntu does
+ #
+ # this way you dont need to install the 300-400 kernel sources to
+ # build a tiny kernel module
+ #
+ pkgdesc="Headers and script for third party modules for grsec kernel"
+ local dir="$subpkgdir"/usr/src/linux-headers-${_abi_release}
+
+ # first we import config, run prepare to set up for building
+ # external modules, and create the scripts
+ mkdir -p "$dir"
+ cp "$srcdir"/kernelconfig "$dir"/.config
+ make -j1 -C "$srcdir"/linux-$_kernver O="$dir" HOSTCC="$CC" \
+ silentoldconfig prepare scripts
+
+ # remove the stuff that poits to real sources. we want 3rd party
+ # modules to believe this is the soruces
+ rm "$dir"/Makefile "$dir"/source
+
+ # copy the needed stuff from real sources
+ #
+ # this is taken from ubuntu kernel build script
+ # http://kernel.ubuntu.com/git?p=ubuntu/ubuntu-jaunty.git;a=blob;f=debian/rules.d/3-binary-indep.mk;hb=HEAD
+ cd "$srcdir"/linux-$_kernver
+ find . -path './include/*' -prune -o -path './scripts/*' -prune \
+ -o -type f \( -name 'Makefile*' -o -name 'Kconfig*' \
+ -o -name 'Kbuild*' -o -name '*.sh' -o -name '*.pl' \
+ -o -name '*.lds' \) | cpio -pdm "$dir"
+ cp -a drivers/media/dvb/dvb-core/*.h "$dir"/drivers/media/dvb/dvb-core
+ cp -a drivers/media/video/*.h "$dir"/drivers/media/video
+ cp -a drivers/media/dvb/frontends/*.h "$dir"/drivers/media/dvb/frontends
+ cp -a scripts include "$dir"
+ find $(find arch -name include -type d -print) -type f \
+ | cpio -pdm "$dir"
+
+ install -Dm644 "$srcdir"/build/Module.symvers \
+ "$dir"/Module.symvers
+
+ mkdir -p "$subpkgdir"/lib/modules/${_abi_release}
+ ln -sf /usr/src/linux-headers-${_abi_release} \
+ "$subpkgdir"/lib/modules/${_abi_release}/build
+}
+
+firmware() {
+ pkgdesc="Firmware for linux kernel"
+ replaces="linux-grsec linux-vserver"
+ mkdir -p "$subpkgdir"/lib
+ mv "$pkgdir"/lib/firmware "$subpkgdir"/lib/
+}
+
+md5sums="7a80058a6382e5108cdb5554d1609615 linux-2.6.30.tar.bz2
+6485fe0cf0f0220493647505bfd2f7b0 patch-2.6.30.10.bz2
+287a382cfb72043867d8092996875f5d grsecurity-2.1.14-2.6.30.8-200909262311.patch
+ca05fd252783b82e01610e775cf56498 net-next-2.6.git-5ef12d98a19254ee5dc851bd83e214b43ec1f725.patch
+9f41d910914f5a516072f0aa500fa117 kernelconfig"
diff --git a/testing/linux-grsec/grsecurity-2.1.14-2.6.30.8-200909262311.patch b/testing/linux-grsec/grsecurity-2.1.14-2.6.30.8-200909262311.patch
new file mode 100644
index 00000000..42b777eb
--- /dev/null
+++ b/testing/linux-grsec/grsecurity-2.1.14-2.6.30.8-200909262311.patch
@@ -0,0 +1,45257 @@
+diff -urNp linux-2.6.30.8/arch/alpha/include/asm/atomic.h linux-2.6.30.8/arch/alpha/include/asm/atomic.h
+--- linux-2.6.30.8/arch/alpha/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/include/asm/atomic.h 2009-07-30 09:48:09.872868955 -0400
+@@ -246,6 +246,9 @@ static __inline__ int atomic64_add_unles
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
+ #define atomic64_inc(v) atomic64_add(1,(v))
+
+ #define atomic_dec(v) atomic_sub(1,(v))
+diff -urNp linux-2.6.30.8/arch/alpha/include/asm/elf.h linux-2.6.30.8/arch/alpha/include/asm/elf.h
+--- linux-2.6.30.8/arch/alpha/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/include/asm/elf.h 2009-07-30 09:48:09.873636524 -0400
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urNp linux-2.6.30.8/arch/alpha/include/asm/kmap_types.h linux-2.6.30.8/arch/alpha/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/alpha/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/include/asm/kmap_types.h 2009-07-30 09:48:09.873636524 -0400
+@@ -24,7 +24,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/alpha/include/asm/pgtable.h linux-2.6.30.8/arch/alpha/include/asm/pgtable.h
+--- linux-2.6.30.8/arch/alpha/include/asm/pgtable.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/include/asm/pgtable.h 2009-07-30 09:48:09.874706218 -0400
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urNp linux-2.6.30.8/arch/alpha/kernel/module.c linux-2.6.30.8/arch/alpha/kernel/module.c
+--- linux-2.6.30.8/arch/alpha/kernel/module.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/kernel/module.c 2009-07-30 09:48:09.875723461 -0400
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff -urNp linux-2.6.30.8/arch/alpha/kernel/osf_sys.c linux-2.6.30.8/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.30.8/arch/alpha/kernel/osf_sys.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/kernel/osf_sys.c 2009-07-30 09:48:09.875723461 -0400
+@@ -1215,6 +1215,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1222,8 +1226,8 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urNp linux-2.6.30.8/arch/alpha/mm/fault.c linux-2.6.30.8/arch/alpha/mm/fault.c
+--- linux-2.6.30.8/arch/alpha/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/alpha/mm/fault.c 2009-07-30 09:48:09.876636955 -0400
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urNp linux-2.6.30.8/arch/arm/include/asm/atomic.h linux-2.6.30.8/arch/arm/include/asm/atomic.h
+--- linux-2.6.30.8/arch/arm/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/include/asm/atomic.h 2009-07-30 09:48:09.876636955 -0400
+@@ -235,6 +235,9 @@ static inline int atomic_add_unless(atom
+
+ #define atomic_inc(v) atomic_add(1, v)
+ #define atomic_dec(v) atomic_sub(1, v)
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i, v) atomic_add(i, v)
++#define atomic_sub_unchecked(i, v) atomic_sub(i, v)
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+diff -urNp linux-2.6.30.8/arch/arm/include/asm/elf.h linux-2.6.30.8/arch/arm/include/asm/elf.h
+--- linux-2.6.30.8/arch/arm/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/include/asm/elf.h 2009-07-30 09:48:09.877630671 -0400
+@@ -103,7 +103,14 @@ extern int arm_elf_read_implies_exec(con
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff -urNp linux-2.6.30.8/arch/arm/include/asm/kmap_types.h linux-2.6.30.8/arch/arm/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/arm/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/include/asm/kmap_types.h 2009-07-30 09:48:09.878525050 -0400
+@@ -19,6 +19,7 @@ enum km_type {
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_L2_CACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/arm/include/asm/uaccess.h linux-2.6.30.8/arch/arm/include/asm/uaccess.h
+--- linux-2.6.30.8/arch/arm/include/asm/uaccess.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/include/asm/uaccess.h 2009-07-30 09:48:09.878525050 -0400
+@@ -398,6 +398,9 @@ extern unsigned long __must_check __strn
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -407,6 +410,9 @@ static inline unsigned long __must_check
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -urNp linux-2.6.30.8/arch/arm/mach-ns9xxx/clock.c linux-2.6.30.8/arch/arm/mach-ns9xxx/clock.c
+--- linux-2.6.30.8/arch/arm/mach-ns9xxx/clock.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/mach-ns9xxx/clock.c 2009-07-30 09:48:09.879705308 -0400
+@@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode
+ return single_open(file, clk_debugfs_show, NULL);
+ }
+
+-static struct file_operations clk_debugfs_operations = {
++static const struct file_operations clk_debugfs_operations = {
+ .open = clk_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.30.8/arch/arm/mm/mmap.c linux-2.6.30.8/arch/arm/mm/mmap.c
+--- linux-2.6.30.8/arch/arm/mm/mmap.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/arm/mm/mmap.c 2009-07-30 09:48:09.881684524 -0400
+@@ -62,6 +62,10 @@ arch_get_unmapped_area(struct file *filp
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -74,10 +78,10 @@ arch_get_unmapped_area(struct file *filp
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -93,8 +97,8 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+diff -urNp linux-2.6.30.8/arch/avr32/include/asm/atomic.h linux-2.6.30.8/arch/avr32/include/asm/atomic.h
+--- linux-2.6.30.8/arch/avr32/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/avr32/include/asm/atomic.h 2009-07-30 09:48:09.881684524 -0400
+@@ -176,9 +176,12 @@ static inline int atomic_sub_if_positive
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+ #define atomic_sub(i, v) (void)atomic_sub_return(i, v)
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+ #define atomic_add(i, v) (void)atomic_add_return(i, v)
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+ #define atomic_dec(v) atomic_sub(1, (v))
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, v)
+ #define atomic_inc_return(v) atomic_add_return(1, v)
+diff -urNp linux-2.6.30.8/arch/avr32/include/asm/elf.h linux-2.6.30.8/arch/avr32/include/asm/elf.h
+--- linux-2.6.30.8/arch/avr32/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/avr32/include/asm/elf.h 2009-07-30 09:48:09.881684524 -0400
+@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff -urNp linux-2.6.30.8/arch/avr32/include/asm/kmap_types.h linux-2.6.30.8/arch/avr32/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/avr32/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/avr32/include/asm/kmap_types.h 2009-07-30 09:48:09.882650296 -0400
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/avr32/mm/fault.c linux-2.6.30.8/arch/avr32/mm/fault.c
+--- linux-2.6.30.8/arch/avr32/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/avr32/mm/fault.c 2009-07-30 09:48:09.882650296 -0400
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -157,6 +174,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff -urNp linux-2.6.30.8/arch/blackfin/include/asm/atomic.h linux-2.6.30.8/arch/blackfin/include/asm/atomic.h
+--- linux-2.6.30.8/arch/blackfin/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/blackfin/include/asm/atomic.h 2009-07-30 09:48:09.882650296 -0400
+@@ -178,6 +178,9 @@ static inline void atomic_set_mask(unsig
+
+ #endif /* !CONFIG_SMP */
+
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc((v))
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+ #define atomic_dec_return(v) atomic_sub_return(1,(v))
+ #define atomic_inc_return(v) atomic_add_return(1,(v))
+diff -urNp linux-2.6.30.8/arch/blackfin/include/asm/kmap_types.h linux-2.6.30.8/arch/blackfin/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/blackfin/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/blackfin/include/asm/kmap_types.h 2009-07-30 09:48:09.883618875 -0400
+@@ -15,6 +15,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/blackfin/mach-bf561/coreb.c linux-2.6.30.8/arch/blackfin/mach-bf561/coreb.c
+--- linux-2.6.30.8/arch/blackfin/mach-bf561/coreb.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/blackfin/mach-bf561/coreb.c 2009-07-30 09:48:09.883618875 -0400
+@@ -292,7 +292,7 @@ static int coreb_ioctl(struct inode *ino
+ return retval;
+ }
+
+-static struct file_operations coreb_fops = {
++static const struct file_operations coreb_fops = {
+ .owner = THIS_MODULE,
+ .llseek = coreb_lseek,
+ .read = coreb_read,
+diff -urNp linux-2.6.30.8/arch/cris/arch-v10/drivers/sync_serial.c linux-2.6.30.8/arch/cris/arch-v10/drivers/sync_serial.c
+--- linux-2.6.30.8/arch/cris/arch-v10/drivers/sync_serial.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/cris/arch-v10/drivers/sync_serial.c 2009-07-30 09:48:09.883618875 -0400
+@@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_sha
+
+ #define NUMBER_OF_PORTS 2
+
+-static struct file_operations sync_serial_fops = {
++static const struct file_operations sync_serial_fops = {
+ .owner = THIS_MODULE,
+ .write = sync_serial_write,
+ .read = sync_serial_read,
+diff -urNp linux-2.6.30.8/arch/cris/arch-v32/drivers/mach-fs/gpio.c linux-2.6.30.8/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+--- linux-2.6.30.8/arch/cris/arch-v32/drivers/mach-fs/gpio.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/cris/arch-v32/drivers/mach-fs/gpio.c 2009-07-30 12:06:52.081911892 -0400
+@@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsign
+ return 0;
+ }
+
+-struct file_operations gpio_fops = {
++struct struct file_operations gpio_fops = {
+ .owner = THIS_MODULE,
+ .poll = gpio_poll,
+ .ioctl = gpio_ioctl,
+diff -urNp linux-2.6.30.8/arch/cris/include/asm/atomic.h linux-2.6.30.8/arch/cris/include/asm/atomic.h
+--- linux-2.6.30.8/arch/cris/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/cris/include/asm/atomic.h 2009-07-30 09:48:09.884412595 -0400
+@@ -152,6 +152,10 @@ static inline int atomic_add_unless(atom
+ }
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
++#define atomic_inc_unchecked(v) atomic_inc((v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -urNp linux-2.6.30.8/arch/cris/include/asm/kmap_types.h linux-2.6.30.8/arch/cris/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/cris/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/cris/include/asm/kmap_types.h 2009-07-30 09:48:09.884412595 -0400
+@@ -19,6 +19,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/frv/include/asm/atomic.h linux-2.6.30.8/arch/frv/include/asm/atomic.h
+--- linux-2.6.30.8/arch/frv/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/frv/include/asm/atomic.h 2009-07-30 09:48:09.885412202 -0400
+@@ -114,6 +114,10 @@ static inline void atomic_dec(atomic_t *
+ atomic_sub_return(1, v);
+ }
+
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
+
+diff -urNp linux-2.6.30.8/arch/frv/include/asm/kmap_types.h linux-2.6.30.8/arch/frv/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/frv/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/frv/include/asm/kmap_types.h 2009-07-30 09:48:09.885412202 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/h8300/include/asm/atomic.h linux-2.6.30.8/arch/h8300/include/asm/atomic.h
+--- linux-2.6.30.8/arch/h8300/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/h8300/include/asm/atomic.h 2009-07-30 09:48:09.885412202 -0400
+@@ -26,6 +26,7 @@ static __inline__ int atomic_add_return(
+ }
+
+ #define atomic_add(i, v) atomic_add_return(i, v)
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+ static __inline__ int atomic_sub_return(int i, atomic_t *v)
+@@ -38,6 +39,7 @@ static __inline__ int atomic_sub_return(
+ }
+
+ #define atomic_sub(i, v) atomic_sub_return(i, v)
++#define atomic_subUnchecked(i, v) atomic_sub(i, v)
+ #define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
+
+ static __inline__ int atomic_inc_return(atomic_t *v)
+@@ -51,6 +53,7 @@ static __inline__ int atomic_inc_return(
+ }
+
+ #define atomic_inc(v) atomic_inc_return(v)
++#define atomic_inc_unchecked(v) atomic_inc(v)
+
+ /*
+ * atomic_inc_and_test - increment and test
+diff -urNp linux-2.6.30.8/arch/h8300/include/asm/kmap_types.h linux-2.6.30.8/arch/h8300/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/h8300/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/h8300/include/asm/kmap_types.h 2009-07-30 09:48:09.885412202 -0400
+@@ -15,6 +15,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/ia64/ia32/binfmt_elf32.c linux-2.6.30.8/arch/ia64/ia32/binfmt_elf32.c
+--- linux-2.6.30.8/arch/ia64/ia32/binfmt_elf32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/ia32/binfmt_elf32.c 2009-07-30 09:48:09.886522893 -0400
+@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
+
+ #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+@@ -69,11 +76,11 @@ ia32_install_gate_page (struct vm_area_s
+ }
+
+
+-static struct vm_operations_struct ia32_shared_page_vm_ops = {
++static const struct vm_operations_struct ia32_shared_page_vm_ops = {
+ .fault = ia32_install_shared_page
+ };
+
+-static struct vm_operations_struct ia32_gate_page_vm_ops = {
++static const struct vm_operations_struct ia32_gate_page_vm_ops = {
+ .fault = ia32_install_gate_page
+ };
+
+diff -urNp linux-2.6.30.8/arch/ia64/ia32/ia32priv.h linux-2.6.30.8/arch/ia64/ia32/ia32priv.h
+--- linux-2.6.30.8/arch/ia64/ia32/ia32priv.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/ia32/ia32priv.h 2009-07-30 09:48:09.886522893 -0400
+@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
+ #define ELF_DATA ELFDATA2LSB
+ #define ELF_ARCH EM_386
+
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
++
+ #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
+ #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
+
+diff -urNp linux-2.6.30.8/arch/ia64/include/asm/atomic.h linux-2.6.30.8/arch/ia64/include/asm/atomic.h
+--- linux-2.6.30.8/arch/ia64/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/include/asm/atomic.h 2009-07-30 09:48:09.886522893 -0400
+@@ -201,8 +201,11 @@ atomic64_add_negative (__s64 i, atomic64
+ #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
+
+ #define atomic_add(i,v) atomic_add_return((i), (v))
++#define atomic_add_unchecked(i,v) atomic_add((i), (v))
+ #define atomic_sub(i,v) atomic_sub_return((i), (v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i), (v))
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
+ #define atomic_dec(v) atomic_sub(1, (v))
+
+ #define atomic64_add(i,v) atomic64_add_return((i), (v))
+diff -urNp linux-2.6.30.8/arch/ia64/include/asm/elf.h linux-2.6.30.8/arch/ia64/include/asm/elf.h
+--- linux-2.6.30.8/arch/ia64/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/include/asm/elf.h 2009-07-30 09:48:09.887468908 -0400
+@@ -43,6 +43,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff -urNp linux-2.6.30.8/arch/ia64/include/asm/kmap_types.h linux-2.6.30.8/arch/ia64/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/ia64/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/include/asm/kmap_types.h 2009-07-30 09:48:09.887468908 -0400
+@@ -22,7 +22,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/ia64/include/asm/pgtable.h linux-2.6.30.8/arch/ia64/include/asm/pgtable.h
+--- linux-2.6.30.8/arch/ia64/include/asm/pgtable.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/include/asm/pgtable.h 2009-07-30 09:48:09.887468908 -0400
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urNp linux-2.6.30.8/arch/ia64/include/asm/uaccess.h linux-2.6.30.8/arch/ia64/include/asm/uaccess.h
+--- linux-2.6.30.8/arch/ia64/include/asm/uaccess.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/include/asm/uaccess.h 2009-07-30 11:10:48.660249525 -0400
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff -urNp linux-2.6.30.8/arch/ia64/kernel/module.c linux-2.6.30.8/arch/ia64/kernel/module.c
+--- linux-2.6.30.8/arch/ia64/kernel/module.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/kernel/module.c 2009-07-30 09:48:09.888412729 -0400
+@@ -312,8 +312,7 @@ module_alloc (unsigned long size)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -499,15 +498,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -690,7 +713,14 @@ do_reloc (struct module *mod, uint8_t r_
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -824,15 +854,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff -urNp linux-2.6.30.8/arch/ia64/kernel/sys_ia64.c linux-2.6.30.8/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.30.8/arch/ia64/kernel/sys_ia64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/kernel/sys_ia64.c 2009-07-30 09:48:09.888412729 -0400
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+diff -urNp linux-2.6.30.8/arch/ia64/mm/fault.c linux-2.6.30.8/arch/ia64/mm/fault.c
+--- linux-2.6.30.8/arch/ia64/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/mm/fault.c 2009-07-30 09:48:09.889484146 -0400
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.30.8/arch/ia64/mm/init.c linux-2.6.30.8/arch/ia64/mm/init.c
+--- linux-2.6.30.8/arch/ia64/mm/init.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/ia64/mm/init.c 2009-07-30 09:48:09.889484146 -0400
+@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff -urNp linux-2.6.30.8/arch/m32r/include/asm/atomic.h linux-2.6.30.8/arch/m32r/include/asm/atomic.h
+--- linux-2.6.30.8/arch/m32r/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m32r/include/asm/atomic.h 2009-07-30 09:48:09.889484146 -0400
+@@ -308,6 +308,10 @@ static __inline__ void atomic_set_mask(u
+ local_irq_restore(flags);
+ }
+
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ /* Atomic operations are already serializing on m32r */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -urNp linux-2.6.30.8/arch/m32r/include/asm/kmap_types.h linux-2.6.30.8/arch/m32r/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/m32r/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m32r/include/asm/kmap_types.h 2009-07-30 09:48:09.890443797 -0400
+@@ -21,7 +21,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/m32r/lib/usercopy.c linux-2.6.30.8/arch/m32r/lib/usercopy.c
+--- linux-2.6.30.8/arch/m32r/lib/usercopy.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m32r/lib/usercopy.c 2009-07-30 09:48:09.890443797 -0400
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff -urNp linux-2.6.30.8/arch/m68k/include/asm/atomic_mm.h linux-2.6.30.8/arch/m68k/include/asm/atomic_mm.h
+--- linux-2.6.30.8/arch/m68k/include/asm/atomic_mm.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m68k/include/asm/atomic_mm.h 2009-07-30 09:48:09.890443797 -0400
+@@ -186,6 +186,10 @@ static __inline__ int atomic_add_unless(
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
++#define atomic_inc_unchecked(v) atomic_inc((v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -urNp linux-2.6.30.8/arch/m68k/include/asm/atomic_no.h linux-2.6.30.8/arch/m68k/include/asm/atomic_no.h
+--- linux-2.6.30.8/arch/m68k/include/asm/atomic_no.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m68k/include/asm/atomic_no.h 2009-07-30 09:48:09.890443797 -0400
+@@ -151,5 +151,9 @@ static __inline__ int atomic_add_unless(
+ #define atomic_dec_return(v) atomic_sub_return(1,(v))
+ #define atomic_inc_return(v) atomic_add_return(1,(v))
+
++#define atomic_inc_unchecked(v) atomic_inc((v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ #include <asm-generic/atomic.h>
+ #endif /* __ARCH_M68KNOMMU_ATOMIC __ */
+diff -urNp linux-2.6.30.8/arch/m68k/include/asm/kmap_types.h linux-2.6.30.8/arch/m68k/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/m68k/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/m68k/include/asm/kmap_types.h 2009-07-30 09:48:09.891413194 -0400
+@@ -15,6 +15,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/mips/include/asm/atomic.h linux-2.6.30.8/arch/mips/include/asm/atomic.h
+--- linux-2.6.30.8/arch/mips/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/include/asm/atomic.h 2009-07-30 09:48:09.891413194 -0400
+@@ -381,6 +381,9 @@ static __inline__ int atomic_add_unless(
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+
+ /*
+ * atomic_dec - decrement and test
+diff -urNp linux-2.6.30.8/arch/mips/include/asm/elf.h linux-2.6.30.8/arch/mips/include/asm/elf.h
+--- linux-2.6.30.8/arch/mips/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/include/asm/elf.h 2009-07-30 09:48:09.891413194 -0400
+@@ -364,4 +364,11 @@ extern int dump_task_fpu(struct task_str
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #endif /* _ASM_ELF_H */
+diff -urNp linux-2.6.30.8/arch/mips/include/asm/kmap_types.h linux-2.6.30.8/arch/mips/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/mips/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/include/asm/kmap_types.h 2009-07-30 09:48:09.892412592 -0400
+@@ -22,7 +22,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/mips/include/asm/page.h linux-2.6.30.8/arch/mips/include/asm/page.h
+--- linux-2.6.30.8/arch/mips/include/asm/page.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/include/asm/page.h 2009-07-30 09:48:09.892412592 -0400
+@@ -85,7 +85,7 @@ extern void copy_user_highpage(struct pa
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff -urNp linux-2.6.30.8/arch/mips/include/asm/system.h linux-2.6.30.8/arch/mips/include/asm/system.h
+--- linux-2.6.30.8/arch/mips/include/asm/system.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/include/asm/system.h 2009-07-30 09:48:09.892412592 -0400
+@@ -217,6 +217,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ALMASK)
+
+ #endif /* _ASM_SYSTEM_H */
+diff -urNp linux-2.6.30.8/arch/mips/kernel/binfmt_elfn32.c linux-2.6.30.8/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.30.8/arch/mips/kernel/binfmt_elfn32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/kernel/binfmt_elfn32.c 2009-07-30 09:48:09.892412592 -0400
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -urNp linux-2.6.30.8/arch/mips/kernel/binfmt_elfo32.c linux-2.6.30.8/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.30.8/arch/mips/kernel/binfmt_elfo32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/kernel/binfmt_elfo32.c 2009-07-30 09:48:09.893444022 -0400
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -urNp linux-2.6.30.8/arch/mips/kernel/process.c linux-2.6.30.8/arch/mips/kernel/process.c
+--- linux-2.6.30.8/arch/mips/kernel/process.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/kernel/process.c 2009-07-30 09:48:09.893444022 -0400
+@@ -457,15 +457,3 @@ unsigned long get_wchan(struct task_stru
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff -urNp linux-2.6.30.8/arch/mips/kernel/syscall.c linux-2.6.30.8/arch/mips/kernel/syscall.c
+--- linux-2.6.30.8/arch/mips/kernel/syscall.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/kernel/syscall.c 2009-07-30 09:48:09.893444022 -0400
+@@ -99,6 +99,11 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -109,7 +114,7 @@ unsigned long arch_get_unmapped_area(str
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+diff -urNp linux-2.6.30.8/arch/mips/mm/fault.c linux-2.6.30.8/arch/mips/mm/fault.c
+--- linux-2.6.30.8/arch/mips/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mips/mm/fault.c 2009-07-30 09:48:09.896533953 -0400
+@@ -26,6 +26,23 @@
+ #include <asm/ptrace.h>
+ #include <asm/highmem.h> /* For VMALLOC_END */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urNp linux-2.6.30.8/arch/mn10300/include/asm/atomic.h linux-2.6.30.8/arch/mn10300/include/asm/atomic.h
+--- linux-2.6.30.8/arch/mn10300/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mn10300/include/asm/atomic.h 2009-07-30 09:48:09.897612189 -0400
+@@ -145,6 +145,10 @@ static inline void atomic_clear_mask(uns
+ #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+ #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ /* Atomic operations are already serializing on MN10300??? */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -urNp linux-2.6.30.8/arch/mn10300/include/asm/kmap_types.h linux-2.6.30.8/arch/mn10300/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/mn10300/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mn10300/include/asm/kmap_types.h 2009-07-30 09:48:09.897612189 -0400
+@@ -25,6 +25,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/mn10300/kernel/setup.c linux-2.6.30.8/arch/mn10300/kernel/setup.c
+--- linux-2.6.30.8/arch/mn10300/kernel/setup.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/mn10300/kernel/setup.c 2009-07-30 09:48:09.897612189 -0400
+@@ -285,7 +285,7 @@ static void c_stop(struct seq_file *m, v
+ {
+ }
+
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+diff -urNp linux-2.6.30.8/arch/parisc/include/asm/atomic.h linux-2.6.30.8/arch/parisc/include/asm/atomic.h
+--- linux-2.6.30.8/arch/parisc/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/include/asm/atomic.h 2009-07-30 09:48:09.898625493 -0400
+@@ -223,8 +223,11 @@ static __inline__ int atomic_add_unless(
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+ #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)(i)),(v))))
++#define atomic_add_unchecked(i,v) atomic_add((i), (v))
+ #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)(i)),(v))))
++#define atomic_sub_unchecked(i,v) atomic_sub((i), (v))
+ #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
++#define atomic_inc_unchecked(v) atomic_inc(v)
+ #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
+
+ #define atomic_add_return(i,v) (__atomic_add_return( ((int)(i)),(v)))
+diff -urNp linux-2.6.30.8/arch/parisc/include/asm/elf.h linux-2.6.30.8/arch/parisc/include/asm/elf.h
+--- linux-2.6.30.8/arch/parisc/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/include/asm/elf.h 2009-07-30 09:48:09.898625493 -0400
+@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urNp linux-2.6.30.8/arch/parisc/include/asm/kmap_types.h linux-2.6.30.8/arch/parisc/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/parisc/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/include/asm/kmap_types.h 2009-07-30 09:48:09.898625493 -0400
+@@ -22,7 +22,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/parisc/include/asm/pgtable.h linux-2.6.30.8/arch/parisc/include/asm/pgtable.h
+--- linux-2.6.30.8/arch/parisc/include/asm/pgtable.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/include/asm/pgtable.h 2009-07-30 09:48:09.898625493 -0400
+@@ -207,6 +207,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urNp linux-2.6.30.8/arch/parisc/kernel/module.c linux-2.6.30.8/arch/parisc/kernel/module.c
+--- linux-2.6.30.8/arch/parisc/kernel/module.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/kernel/module.c 2009-07-30 09:48:09.899565255 -0400
+@@ -91,16 +91,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -334,13 +356,13 @@ int module_frob_arch_sections(CONST Elf_
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
+-
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -358,7 +380,7 @@ static Elf64_Word get_got(struct module
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -376,7 +398,7 @@ static Elf64_Word get_got(struct module
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -394,7 +416,7 @@ static Elf_Addr get_fdesc(struct module
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -810,7 +832,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff -urNp linux-2.6.30.8/arch/parisc/kernel/sys_parisc.c linux-2.6.30.8/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.30.8/arch/parisc/kernel/sys_parisc.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/kernel/sys_parisc.c 2009-07-30 09:48:09.899565255 -0400
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff -urNp linux-2.6.30.8/arch/parisc/kernel/traps.c linux-2.6.30.8/arch/parisc/kernel/traps.c
+--- linux-2.6.30.8/arch/parisc/kernel/traps.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/kernel/traps.c 2009-07-30 09:48:09.900676754 -0400
+@@ -734,9 +734,7 @@ void notrace handle_interruption(int cod
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urNp linux-2.6.30.8/arch/parisc/mm/fault.c linux-2.6.30.8/arch/parisc/mm/fault.c
+--- linux-2.6.30.8/arch/parisc/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/parisc/mm/fault.c 2009-07-30 09:48:09.900676754 -0400
+@@ -16,6 +16,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -53,7 +54,7 @@ DEFINE_PER_CPU(struct exception_data, ex
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -139,6 +140,116 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -193,8 +304,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/atomic.h linux-2.6.30.8/arch/powerpc/include/asm/atomic.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/atomic.h 2009-07-30 09:48:09.900676754 -0400
+@@ -244,6 +244,10 @@ static __inline__ int atomic_dec_if_posi
+ return t;
+ }
+
++#define atomic_inc_unchecked(v) atomic_inc((v))
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/elf.h linux-2.6.30.8/arch/powerpc/include/asm/elf.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/elf.h 2009-07-30 09:48:09.901536944 -0400
+@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/kmap_types.h linux-2.6.30.8/arch/powerpc/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/kmap_types.h 2009-07-30 09:48:09.901536944 -0400
+@@ -26,6 +26,7 @@ enum km_type {
+ KM_SOFTIRQ1,
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/page_64.h linux-2.6.30.8/arch/powerpc/include/asm/page_64.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/page_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/page_64.h 2009-07-30 09:48:09.902599231 -0400
+@@ -170,15 +170,18 @@ do { \
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/page.h>
+
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/page.h linux-2.6.30.8/arch/powerpc/include/asm/page.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/page.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/page.h 2009-07-30 09:48:09.902599231 -0400
+@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+diff -urNp linux-2.6.30.8/arch/powerpc/include/asm/uaccess.h linux-2.6.30.8/arch/powerpc/include/asm/uaccess.h
+--- linux-2.6.30.8/arch/powerpc/include/asm/uaccess.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/include/asm/uaccess.h 2009-07-30 11:10:48.774534063 -0400
+@@ -334,6 +334,9 @@ static inline unsigned long copy_from_us
+ {
+ unsigned long over;
+
++ if (((long)n < 0) || (n > INT_MAX))
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+@@ -349,6 +352,9 @@ static inline unsigned long copy_to_user
+ {
+ unsigned long over;
+
++ if (((long)n < 0) || (n > INT_MAX))
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/module_32.c linux-2.6.30.8/arch/powerpc/kernel/module_32.c
+--- linux-2.6.30.8/arch/powerpc/kernel/module_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/module_32.c 2009-07-30 09:48:09.903567873 -0400
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/process.c linux-2.6.30.8/arch/powerpc/kernel/process.c
+--- linux-2.6.30.8/arch/powerpc/kernel/process.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/process.c 2009-07-30 09:48:09.903567873 -0400
+@@ -1147,36 +1147,3 @@ unsigned long arch_align_stack(unsigned
+ sp -= get_random_int() & ~PAGE_MASK;
+ return sp & ~0xf;
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/setup-common.c linux-2.6.30.8/arch/powerpc/kernel/setup-common.c
+--- linux-2.6.30.8/arch/powerpc/kernel/setup-common.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/setup-common.c 2009-07-30 09:48:09.903567873 -0400
+@@ -328,7 +328,7 @@ static void c_stop(struct seq_file *m, v
+ {
+ }
+
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/signal_32.c linux-2.6.30.8/arch/powerpc/kernel/signal_32.c
+--- linux-2.6.30.8/arch/powerpc/kernel/signal_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/signal_32.c 2009-07-30 09:48:09.903567873 -0400
+@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/signal_64.c linux-2.6.30.8/arch/powerpc/kernel/signal_64.c
+--- linux-2.6.30.8/arch/powerpc/kernel/signal_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/signal_64.c 2009-07-30 09:48:09.905069777 -0400
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff -urNp linux-2.6.30.8/arch/powerpc/kernel/vdso.c linux-2.6.30.8/arch/powerpc/kernel/vdso.c
+--- linux-2.6.30.8/arch/powerpc/kernel/vdso.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kernel/vdso.c 2009-07-30 09:48:09.905069777 -0400
+@@ -211,7 +211,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -228,7 +228,7 @@ int arch_setup_additional_pages(struct l
+ */
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+- vdso_pages << PAGE_SHIFT, 0, 0);
++ vdso_pages << PAGE_SHIFT, 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff -urNp linux-2.6.30.8/arch/powerpc/kvm/timing.c linux-2.6.30.8/arch/powerpc/kvm/timing.c
+--- linux-2.6.30.8/arch/powerpc/kvm/timing.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/kvm/timing.c 2009-07-30 09:48:09.905069777 -0400
+@@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struc
+ return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+ }
+
+-static struct file_operations kvmppc_exit_timing_fops = {
++static const struct file_operations kvmppc_exit_timing_fops = {
+ .owner = THIS_MODULE,
+ .open = kvmppc_exit_timing_open,
+ .read = seq_read,
+diff -urNp linux-2.6.30.8/arch/powerpc/lib/usercopy_64.c linux-2.6.30.8/arch/powerpc/lib/usercopy_64.c
+--- linux-2.6.30.8/arch/powerpc/lib/usercopy_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/lib/usercopy_64.c 2009-07-30 11:10:48.798471204 -0400
+@@ -11,6 +11,9 @@
+
+ unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if (unlikely(((long)n < 0) || (n > INT_MAX)))
++ return n;
++
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ n = __copy_from_user(to, from, n);
+ else
+@@ -20,6 +23,9 @@ unsigned long copy_from_user(void *to, c
+
+ unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if (unlikely(((long)n < 0) || (n > INT_MAX)))
++ return n;
++
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -urNp linux-2.6.30.8/arch/powerpc/mm/fault.c linux-2.6.30.8/arch/powerpc/mm/fault.c
+--- linux-2.6.30.8/arch/powerpc/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/mm/fault.c 2009-07-30 09:48:09.905534132 -0400
+@@ -29,6 +29,10 @@
+ #include <linux/module.h>
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -63,6 +67,363 @@ static inline int notify_page_fault(stru
+ }
+ #endif
+
++#ifdef CONFIG_PAX_EMUSIGRT
++void pax_syscall_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_syscall = 0UL;
++}
++
++static int pax_syscall_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x44000002U; /* sc */
++ __flush_dcache_icache(kaddr);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_syscall_close,
++ .fault = pax_syscall_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched GOT trampoline was detected
++ * 3 when patched PLT trampoline was detected
++ * 4 when unpatched PLT trampoline was detected
++ * 5 when sigreturn trampoline was detected
++ * 6 when rt_sigreturn trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched GOT emulation */
++ unsigned int blrl;
++
++ err = get_user(blrl, (unsigned int *)regs->nip);
++
++ if (!err && blrl == 0x4E800021U) {
++ unsigned long temp = regs->nip;
++
++ regs->nip = regs->link & 0xFFFFFFFCUL;
++ regs->link = temp + 4UL;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int b;
++
++ err = get_user(b, (unsigned int *)regs->nip);
++
++ if (!err && (b & 0xFC000003U) == 0x48000000U) {
++ regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
++ return 3;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation #1 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(rlwinm, (unsigned int *)addr);
++ err |= get_user(add, (unsigned int *)(addr+4));
++ err |= get_user(li2, (unsigned int *)(addr+8));
++ err |= get_user(addis2, (unsigned int *)(addr+12));
++ err |= get_user(mtctr, (unsigned int *)(addr+16));
++ err |= get_user(li3, (unsigned int *)(addr+20));
++ err |= get_user(addis3, (unsigned int *)(addr+24));
++ err |= get_user(bctr, (unsigned int *)(addr+28));
++
++ if (err)
++ break;
++
++ if (rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++
++#if 0
++ do { /* PaX: unpatched PLT emulation #2 */
++ unsigned int lis, lwzu, b, bctr;
++
++ err = get_user(lis, (unsigned int *)regs->nip);
++ err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
++ err |= get_user(b, (unsigned int *)(regs->nip+8));
++ err |= get_user(bctr, (unsigned int *)(regs->nip+12));
++
++ if (err)
++ break;
++
++ if ((lis & 0xFFFF0000U) == 0x39600000U &&
++ (lwzu & 0xU) == 0xU &&
++ (b & 0xFC000003U) == 0x48000000U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int *)addr);
++ err |= get_user(addi, (unsigned int *)(addr+4));
++ err |= get_user(rlwinm, (unsigned int *)(addr+8));
++ err |= get_user(add, (unsigned int *)(addr+12));
++ err |= get_user(li2, (unsigned int *)(addr+16));
++ err |= get_user(addis2, (unsigned int *)(addr+20));
++ err |= get_user(mtctr, (unsigned int *)(addr+24));
++ err |= get_user(li3, (unsigned int *)(addr+28));
++ err |= get_user(addis3, (unsigned int *)(addr+32));
++ err |= get_user(bctr, (unsigned int *)(addr+36));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (addi & 0xFFFF0000U) == 0x396B0000U &&
++ rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: unpatched PLT emulation #3 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int addis, lwz, mtctr, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int *)addr);
++ err |= get_user(lwz, (unsigned int *)(addr+4));
++ err |= get_user(mtctr, (unsigned int *)(addr+8));
++ err |= get_user(bctr, (unsigned int *)(addr+12));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (lwz & 0xFFFF0000U) == 0x816B0000U &&
++ mtctr == 0x7D6903A6U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int r11;
++
++ addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++
++ err = get_user(r11, (unsigned int *)addr);
++ if (err)
++ break;
++
++ regs->gpr[PT_R11] = r11;
++ regs->ctr = r11;
++ regs->nip = r11;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ do { /* PaX: sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned long call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_syscall)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->gpr[PT_R0] = __NR_sigreturn;
++ regs->nip = call_syscall;
++ return 5;
++ }
++ } while (0);
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned int call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto rt_emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto rt_emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_syscall)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++rt_emulate:
++ regs->gpr[PT_R0] = __NR_rt_sigreturn;
++ regs->nip = call_syscall;
++ return 6;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -133,7 +494,7 @@ int __kprobes do_page_fault(struct pt_re
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -327,6 +688,37 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC64
++ if (is_exec && (error_code & DSISR_PROTFAULT)) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ case 4:
++ return 0;
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ case 5:
++ case 6:
++ return 0;
++#endif
++
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff -urNp linux-2.6.30.8/arch/powerpc/mm/mmap_64.c linux-2.6.30.8/arch/powerpc/mm/mmap_64.c
+--- linux-2.6.30.8/arch/powerpc/mm/mmap_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/mm/mmap_64.c 2009-07-30 09:48:09.905534132 -0400
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.30.8/arch/powerpc/platforms/cell/spufs/file.c linux-2.6.30.8/arch/powerpc/platforms/cell/spufs/file.c
+--- linux-2.6.30.8/arch/powerpc/platforms/cell/spufs/file.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/platforms/cell/spufs/file.c 2009-07-30 09:48:09.906622773 -0400
+@@ -147,7 +147,7 @@ static int __fops ## _open(struct inode
+ __simple_attr_check_format(__fmt, 0ull); \
+ return spufs_attr_open(inode, file, __get, __set, __fmt); \
+ } \
+-static struct file_operations __fops = { \
++static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = spufs_attr_release, \
+@@ -309,7 +309,7 @@ static int spufs_mem_mmap_access(struct
+ return len;
+ }
+
+-static struct vm_operations_struct spufs_mem_mmap_vmops = {
++static const struct vm_operations_struct spufs_mem_mmap_vmops = {
+ .fault = spufs_mem_mmap_fault,
+ .access = spufs_mem_mmap_access,
+ };
+@@ -436,7 +436,7 @@ static int spufs_cntl_mmap_fault(struct
+ return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_cntl_mmap_vmops = {
++static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
+ .fault = spufs_cntl_mmap_fault,
+ };
+
+@@ -1143,7 +1143,7 @@ spufs_signal1_mmap_fault(struct vm_area_
+ #endif
+ }
+
+-static struct vm_operations_struct spufs_signal1_mmap_vmops = {
++static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
+ .fault = spufs_signal1_mmap_fault,
+ };
+
+@@ -1279,7 +1279,7 @@ spufs_signal2_mmap_fault(struct vm_area_
+ #endif
+ }
+
+-static struct vm_operations_struct spufs_signal2_mmap_vmops = {
++static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
+ .fault = spufs_signal2_mmap_fault,
+ };
+
+@@ -1397,7 +1397,7 @@ spufs_mss_mmap_fault(struct vm_area_stru
+ return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_mss_mmap_vmops = {
++static const struct vm_operations_struct spufs_mss_mmap_vmops = {
+ .fault = spufs_mss_mmap_fault,
+ };
+
+@@ -1458,7 +1458,7 @@ spufs_psmap_mmap_fault(struct vm_area_st
+ return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_psmap_mmap_vmops = {
++static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
+ .fault = spufs_psmap_mmap_fault,
+ };
+
+@@ -1517,7 +1517,7 @@ spufs_mfc_mmap_fault(struct vm_area_stru
+ return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
+ }
+
+-static struct vm_operations_struct spufs_mfc_mmap_vmops = {
++static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
+ .fault = spufs_mfc_mmap_fault,
+ };
+
+diff -urNp linux-2.6.30.8/arch/powerpc/platforms/pseries/dtl.c linux-2.6.30.8/arch/powerpc/platforms/pseries/dtl.c
+--- linux-2.6.30.8/arch/powerpc/platforms/pseries/dtl.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/platforms/pseries/dtl.c 2009-07-30 12:06:52.084821916 -0400
+@@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file
+ return n_read * sizeof(struct dtl_entry);
+ }
+
+-static struct file_operations dtl_fops = {
++static const struct file_operations dtl_fops = {
+ .open = dtl_file_open,
+ .release = dtl_file_release,
+ .read = dtl_file_read,
+diff -urNp linux-2.6.30.8/arch/powerpc/platforms/pseries/hvCall_inst.c linux-2.6.30.8/arch/powerpc/platforms/pseries/hvCall_inst.c
+--- linux-2.6.30.8/arch/powerpc/platforms/pseries/hvCall_inst.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/powerpc/platforms/pseries/hvCall_inst.c 2009-07-30 09:48:09.906622773 -0400
+@@ -71,7 +71,7 @@ static int hc_show(struct seq_file *m, v
+ return 0;
+ }
+
+-static struct seq_operations hcall_inst_seq_ops = {
++static const struct seq_operations hcall_inst_seq_ops = {
+ .start = hc_start,
+ .next = hc_next,
+ .stop = hc_stop,
+diff -urNp linux-2.6.30.8/arch/s390/hypfs/inode.c linux-2.6.30.8/arch/s390/hypfs/inode.c
+--- linux-2.6.30.8/arch/s390/hypfs/inode.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/s390/hypfs/inode.c 2009-07-30 09:48:09.907613298 -0400
+@@ -41,7 +41,7 @@ struct hypfs_sb_info {
+
+ static const struct file_operations hypfs_file_ops;
+ static struct file_system_type hypfs_type;
+-static struct super_operations hypfs_s_ops;
++static const struct super_operations hypfs_s_ops;
+
+ /* start of list of all dentries, which have to be deleted on update */
+ static struct dentry *hypfs_last_dentry;
+@@ -476,7 +476,7 @@ static struct file_system_type hypfs_typ
+ .kill_sb = hypfs_kill_super
+ };
+
+-static struct super_operations hypfs_s_ops = {
++static const struct super_operations hypfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = hypfs_drop_inode,
+ .show_options = hypfs_show_options,
+diff -urNp linux-2.6.30.8/arch/s390/include/asm/atomic.h linux-2.6.30.8/arch/s390/include/asm/atomic.h
+--- linux-2.6.30.8/arch/s390/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/s390/include/asm/atomic.h 2009-07-30 09:48:09.908552392 -0400
+@@ -82,8 +82,10 @@ static __inline__ int atomic_add_return(
+ return __CS_LOOP(v, i, "ar");
+ }
+ #define atomic_add(_i, _v) atomic_add_return(_i, _v)
++#define atomic_add_unchecked(_i, _v) atomic_add((_i), (_v))
+ #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+ #define atomic_inc(_v) atomic_add_return(1, _v)
++#define atomic_inc_unchecked(_v) atomic_inc(_v)
+ #define atomic_inc_return(_v) atomic_add_return(1, _v)
+ #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+
+@@ -92,6 +94,7 @@ static __inline__ int atomic_sub_return(
+ return __CS_LOOP(v, i, "sr");
+ }
+ #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
++#define atomic_sub_unchecked(_i, _v) atomic_sub((_i), (_v))
+ #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
+ #define atomic_dec(_v) atomic_sub_return(1, _v)
+ #define atomic_dec_return(_v) atomic_sub_return(1, _v)
+diff -urNp linux-2.6.30.8/arch/s390/include/asm/kmap_types.h linux-2.6.30.8/arch/s390/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/s390/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/s390/include/asm/kmap_types.h 2009-07-30 09:48:09.908552392 -0400
+@@ -16,6 +16,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/s390/include/asm/uaccess.h linux-2.6.30.8/arch/s390/include/asm/uaccess.h
+--- linux-2.6.30.8/arch/s390/include/asm/uaccess.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/s390/include/asm/uaccess.h 2009-07-30 09:48:09.908552392 -0400
+@@ -232,6 +232,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_sleep();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -283,6 +290,10 @@ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_sleep();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else
+diff -urNp linux-2.6.30.8/arch/s390/kernel/module.c linux-2.6.30.8/arch/s390/kernel/module.c
+--- linux-2.6.30.8/arch/s390/kernel/module.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/s390/kernel/module.c 2009-07-30 09:48:09.908552392 -0400
+@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff -urNp linux-2.6.30.8/arch/sh/include/asm/atomic.h linux-2.6.30.8/arch/sh/include/asm/atomic.h
+--- linux-2.6.30.8/arch/sh/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sh/include/asm/atomic.h 2009-07-30 09:48:09.909647067 -0400
+@@ -43,6 +43,9 @@
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i,v) atomic_add((i),(v))
++#define atomic_sub_unchecked(i,v) atomic_sub((i),(v))
+ #define atomic_dec(v) atomic_sub(1,(v))
+
+ #ifndef CONFIG_GUSA_RB
+diff -urNp linux-2.6.30.8/arch/sh/include/asm/kmap_types.h linux-2.6.30.8/arch/sh/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/sh/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sh/include/asm/kmap_types.h 2009-07-30 09:48:09.909647067 -0400
+@@ -24,7 +24,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/atomic_32.h linux-2.6.30.8/arch/sparc/include/asm/atomic_32.h
+--- linux-2.6.30.8/arch/sparc/include/asm/atomic_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/atomic_32.h 2009-07-30 09:48:09.910522181 -0400
+@@ -28,8 +28,11 @@ extern void atomic_set(atomic_t *, int);
+ #define atomic_read(v) ((v)->counter)
+
+ #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+ #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+ #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
++#define atomic_inc_unchecked(v) atomic_inc(v)
+ #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
+
+ #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/atomic_64.h linux-2.6.30.8/arch/sparc/include/asm/atomic_64.h
+--- linux-2.6.30.8/arch/sparc/include/asm/atomic_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/atomic_64.h 2009-07-30 09:48:09.910522181 -0400
+@@ -20,8 +20,10 @@
+ #define atomic64_set(v, i) (((v)->counter) = i)
+
+ extern void atomic_add(int, atomic_t *);
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+ extern void atomic64_add(int, atomic64_t *);
+ extern void atomic_sub(int, atomic_t *);
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+ extern void atomic64_sub(int, atomic64_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
+@@ -59,6 +61,7 @@ extern int atomic64_sub_ret(int, atomic6
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++#define atomic_inc_unchecked(v) atomic_inc(v)
+ #define atomic64_inc(v) atomic64_add(1, v)
+
+ #define atomic_dec(v) atomic_sub(1, v)
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/elf_32.h linux-2.6.30.8/arch/sparc/include/asm/elf_32.h
+--- linux-2.6.30.8/arch/sparc/include/asm/elf_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/elf_32.h 2009-07-30 09:48:09.910522181 -0400
+@@ -116,6 +116,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/elf_64.h linux-2.6.30.8/arch/sparc/include/asm/elf_64.h
+--- linux-2.6.30.8/arch/sparc/include/asm/elf_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/elf_64.h 2009-07-30 09:48:09.910522181 -0400
+@@ -163,6 +163,12 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/kmap_types.h linux-2.6.30.8/arch/sparc/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/sparc/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/kmap_types.h 2009-07-30 09:48:09.910522181 -0400
+@@ -19,6 +19,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/pgtable_32.h linux-2.6.30.8/arch/sparc/include/asm/pgtable_32.h
+--- linux-2.6.30.8/arch/sparc/include/asm/pgtable_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/pgtable_32.h 2009-07-30 09:48:09.910522181 -0400
+@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.30.8/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-2.6.30.8/arch/sparc/include/asm/pgtsrmmu.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/pgtsrmmu.h 2009-07-30 09:48:09.911921584 -0400
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/uaccess_32.h linux-2.6.30.8/arch/sparc/include/asm/uaccess_32.h
+--- linux-2.6.30.8/arch/sparc/include/asm/uaccess_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/uaccess_32.h 2009-07-30 09:48:09.911921584 -0400
+@@ -246,6 +246,9 @@ extern unsigned long __copy_user(void __
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) to, n))
+ return __copy_user(to, (__force void __user *) from, n);
+ else
+@@ -259,6 +262,9 @@ static inline unsigned long __copy_to_us
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) from, n))
+ return __copy_user((__force void __user *) to, from, n);
+ else
+@@ -267,6 +273,9 @@ static inline unsigned long copy_from_us
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff -urNp linux-2.6.30.8/arch/sparc/include/asm/uaccess_64.h linux-2.6.30.8/arch/sparc/include/asm/uaccess_64.h
+--- linux-2.6.30.8/arch/sparc/include/asm/uaccess_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/include/asm/uaccess_64.h 2009-07-30 11:10:48.823569054 -0400
+@@ -212,7 +212,12 @@ extern unsigned long copy_from_user_fixu
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
++
++ if (unlikely(((long)size > INT_MAX) || ((long)size < 0)))
++ return size;
++
++ ret = ___copy_from_user(to, from, size);
+
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+@@ -228,7 +233,12 @@ extern unsigned long copy_to_user_fixup(
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
++
++ if (unlikely(((long)size > INT_MAX) || ((long)size < 0)))
++ return size;
++
++ ret = ___copy_to_user(to, from, size);
+
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+diff -urNp linux-2.6.30.8/arch/sparc/kernel/Makefile linux-2.6.30.8/arch/sparc/kernel/Makefile
+--- linux-2.6.30.8/arch/sparc/kernel/Makefile 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/kernel/Makefile 2009-07-30 09:48:09.911921584 -0400
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -urNp linux-2.6.30.8/arch/sparc/kernel/sys_sparc_32.c linux-2.6.30.8/arch/sparc/kernel/sys_sparc_32.c
+--- linux-2.6.30.8/arch/sparc/kernel/sys_sparc_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/kernel/sys_sparc_32.c 2009-07-30 09:48:09.912653453 -0400
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+diff -urNp linux-2.6.30.8/arch/sparc/kernel/sys_sparc_64.c linux-2.6.30.8/arch/sparc/kernel/sys_sparc_64.c
+--- linux-2.6.30.8/arch/sparc/kernel/sys_sparc_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/kernel/sys_sparc_64.c 2009-08-26 16:18:36.187752551 -0400
+@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -153,9 +157,9 @@ unsigned long arch_get_unmapped_area(str
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -175,8 +179,8 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+@@ -216,7 +220,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -380,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -394,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.30.8/arch/sparc/Makefile linux-2.6.30.8/arch/sparc/Makefile
+--- linux-2.6.30.8/arch/sparc/Makefile 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/Makefile 2009-07-30 11:10:48.852135371 -0400
+@@ -81,7 +81,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff -urNp linux-2.6.30.8/arch/sparc/mm/fault_32.c linux-2.6.30.8/arch/sparc/mm/fault_32.c
+--- linux-2.6.30.8/arch/sparc/mm/fault_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/mm/fault_32.c 2009-09-12 16:45:57.368158675 -0400
+@@ -21,6 +21,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -167,6 +170,265 @@ static unsigned long compute_si_addr(str
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++
++ /* PaX: newer glibc/binutils generate sethi/jmp instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ addr = (save & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -231,6 +493,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urNp linux-2.6.30.8/arch/sparc/mm/fault_64.c linux-2.6.30.8/arch/sparc/mm/fault_64.c
+--- linux-2.6.30.8/arch/sparc/mm/fault_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/mm/fault_64.c 2009-09-26 23:11:15.962139535 -0400
+@@ -20,6 +20,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -249,6 +252,413 @@ static void noinline bogus_32bit_fault_a
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_EMUPLT
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int mov1, call, mov2;
++
++ err = get_user(mov1, (unsigned int *)regs->tpc);
++ err |= get_user(call, (unsigned int *)(regs->tpc+4));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if (mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+12));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020 &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or, (unsigned int *)(regs->tpc+12));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+16));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+20));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020 &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #7 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: newer glibc/binutils generate sethi/jmp instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ addr = (save & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -315,6 +725,29 @@ asmlinkage void __kprobes do_sparc64_fau
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.30.8/arch/sparc/mm/init_32.c linux-2.6.30.8/arch/sparc/mm/init_32.c
+--- linux-2.6.30.8/arch/sparc/mm/init_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/mm/init_32.c 2009-07-30 09:48:09.914627627 -0400
+@@ -316,6 +316,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -341,17 +344,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urNp linux-2.6.30.8/arch/sparc/mm/Makefile linux-2.6.30.8/arch/sparc/mm/Makefile
+--- linux-2.6.30.8/arch/sparc/mm/Makefile 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/mm/Makefile 2009-07-30 09:48:09.912653453 -0400
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff -urNp linux-2.6.30.8/arch/sparc/mm/srmmu.c linux-2.6.30.8/arch/sparc/mm/srmmu.c
+--- linux-2.6.30.8/arch/sparc/mm/srmmu.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/sparc/mm/srmmu.c 2009-07-30 09:48:09.914627627 -0400
+@@ -2148,6 +2148,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff -urNp linux-2.6.30.8/arch/um/include/asm/kmap_types.h linux-2.6.30.8/arch/um/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/um/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/um/include/asm/kmap_types.h 2009-07-30 09:48:09.914627627 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/arch/um/include/asm/page.h linux-2.6.30.8/arch/um/include/asm/page.h
+--- linux-2.6.30.8/arch/um/include/asm/page.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/um/include/asm/page.h 2009-07-30 09:48:09.915798567 -0400
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff -urNp linux-2.6.30.8/arch/um/sys-i386/syscalls.c linux-2.6.30.8/arch/um/sys-i386/syscalls.c
+--- linux-2.6.30.8/arch/um/sys-i386/syscalls.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/um/sys-i386/syscalls.c 2009-07-30 09:48:09.915798567 -0400
+@@ -11,6 +11,21 @@
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/i386 didn't use to be able to handle more than
+diff -urNp linux-2.6.30.8/arch/x86/boot/bitops.h linux-2.6.30.8/arch/x86/boot/bitops.h
+--- linux-2.6.30.8/arch/x86/boot/bitops.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/bitops.h 2009-07-30 09:48:09.917626356 -0400
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff -urNp linux-2.6.30.8/arch/x86/boot/boot.h linux-2.6.30.8/arch/x86/boot/boot.h
+--- linux-2.6.30.8/arch/x86/boot/boot.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/boot.h 2009-07-30 09:48:09.917626356 -0400
+@@ -80,7 +80,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -176,7 +176,7 @@ static inline void wrgs32(u32 v, addr_t
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/boot/compressed/head_32.S linux-2.6.30.8/arch/x86/boot/compressed/head_32.S
+--- linux-2.6.30.8/arch/x86/boot/compressed/head_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/compressed/head_32.S 2009-07-30 09:48:09.917626356 -0400
+@@ -68,7 +68,7 @@ ENTRY(startup_32)
+ addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx
+ andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Replace the compressed data size with the uncompressed size */
+@@ -78,8 +78,8 @@ ENTRY(startup_32)
+ /* Add 8 bytes for every 32K input block */
+ shrl $12, %eax
+ addl %eax, %ebx
+- /* Add 32K + 18 bytes of extra slack */
+- addl $(32768 + 18), %ebx
++ /* Add 64K of extra slack */
++ addl $65536, %ebx
+ /* Align on a 4K boundary */
+ addl $4095, %ebx
+ andl $~4095, %ebx
+@@ -103,7 +103,7 @@ ENTRY(startup_32)
+ addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
+ andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebp
++ movl $____LOAD_PHYSICAL_ADDR, %ebp
+ #endif
+
+ /*
+@@ -160,16 +160,15 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+ */
+
+ 1: subl $4, %edi
+- movl 0(%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ movl (%edi), %ecx
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff -urNp linux-2.6.30.8/arch/x86/boot/compressed/misc.c linux-2.6.30.8/arch/x86/boot/compressed/misc.c
+--- linux-2.6.30.8/arch/x86/boot/compressed/misc.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/compressed/misc.c 2009-07-30 09:48:09.917626356 -0400
+@@ -288,7 +288,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -336,7 +336,7 @@ asmlinkage void decompress_kernel(void *
+ if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
+ error("Destination address too large");
+ #ifndef CONFIG_RELOCATABLE
+- if ((u32)output != LOAD_PHYSICAL_ADDR)
++ if ((u32)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+ #endif
+diff -urNp linux-2.6.30.8/arch/x86/boot/compressed/relocs.c linux-2.6.30.8/arch/x86/boot/compressed/relocs.c
+--- linux-2.6.30.8/arch/x86/boot/compressed/relocs.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/compressed/relocs.c 2009-08-24 21:08:46.297666606 -0400
+@@ -10,8 +10,11 @@
+ #define USE_BSD
+ #include <endian.h>
+
++#include "../../../../include/linux/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
+
+ static int is_safe_abs_reloc(const char* sym_name)
+ {
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
+ if (!strcmp(sym_name, safe_abs_relocs[i]))
+@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -504,6 +548,23 @@ static void walk_relocs(void (*visit)(El
+ if (sym->st_shndx == SHN_ABS) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text.head")) {
++ if (strcmp(sym_name(sym_strtab, sym), "__init_end") &&
++ strcmp(sym_name(sym_strtab, sym), "KERNEL_TEXT_OFFSET"))
++ continue;
++ }
++ if (!strcmp(sec_name(sym->st_shndx), ".text"))
++ continue;
++#endif
+ if (r_type == R_386_NONE || r_type == R_386_PC32) {
+ /*
+ * NONE can be ignored and and PC relative
+@@ -541,7 +602,7 @@ static int cmp_relocs(const void *va, co
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -634,6 +695,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff -urNp linux-2.6.30.8/arch/x86/boot/cpucheck.c linux-2.6.30.8/arch/x86/boot/cpucheck.c
+--- linux-2.6.30.8/arch/x86/boot/cpucheck.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/cpucheck.c 2009-07-30 09:48:09.918715361 -0400
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff -urNp linux-2.6.30.8/arch/x86/boot/edd.c linux-2.6.30.8/arch/x86/boot/edd.c
+--- linux-2.6.30.8/arch/x86/boot/edd.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/edd.c 2009-07-30 09:48:09.919627263 -0400
+@@ -81,7 +81,7 @@ static int get_edd_info(u8 devno, struct
+ ax = 0x4100;
+ bx = EDDMAGIC1;
+ dx = devno;
+- asm("pushfl; stc; int $0x13; setc %%al; popfl"
++ asm volatile("pushfl; stc; int $0x13; setc %%al; popfl"
+ : "+a" (ax), "+b" (bx), "=c" (cx), "+d" (dx)
+ : : "esi", "edi");
+
+@@ -100,7 +100,7 @@ static int get_edd_info(u8 devno, struct
+ ei->params.length = sizeof(ei->params);
+ ax = 0x4800;
+ dx = devno;
+- asm("pushfl; int $0x13; popfl"
++ asm volatile("pushfl; int $0x13; popfl"
+ : "+a" (ax), "+d" (dx), "=m" (ei->params)
+ : "S" (&ei->params)
+ : "ebx", "ecx", "edi");
+@@ -111,7 +111,7 @@ static int get_edd_info(u8 devno, struct
+ ax = 0x0800;
+ dx = devno;
+ di = 0;
+- asm("pushw %%es; "
++ asm volatile("pushw %%es; "
+ "movw %%di,%%es; "
+ "pushfl; stc; int $0x13; setc %%al; popfl; "
+ "popw %%es"
+diff -urNp linux-2.6.30.8/arch/x86/boot/main.c linux-2.6.30.8/arch/x86/boot/main.c
+--- linux-2.6.30.8/arch/x86/boot/main.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/main.c 2009-07-30 09:48:09.919627263 -0400
+@@ -78,7 +78,7 @@ static void query_ist(void)
+ if (cpu.level < 6)
+ return;
+
+- asm("int $0x15"
++ asm volatile("int $0x15"
+ : "=a" (boot_params.ist_info.signature),
+ "=b" (boot_params.ist_info.command),
+ "=c" (boot_params.ist_info.event),
+diff -urNp linux-2.6.30.8/arch/x86/boot/mca.c linux-2.6.30.8/arch/x86/boot/mca.c
+--- linux-2.6.30.8/arch/x86/boot/mca.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/mca.c 2009-07-30 09:48:09.919627263 -0400
+@@ -19,7 +19,7 @@ int query_mca(void)
+ u8 err;
+ u16 es, bx, len;
+
+- asm("pushw %%es ; "
++ asm volatile("pushw %%es ; "
+ "int $0x15 ; "
+ "setc %0 ; "
+ "movw %%es, %1 ; "
+diff -urNp linux-2.6.30.8/arch/x86/boot/memory.c linux-2.6.30.8/arch/x86/boot/memory.c
+--- linux-2.6.30.8/arch/x86/boot/memory.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/memory.c 2009-07-30 09:48:09.919627263 -0400
+@@ -47,7 +47,7 @@ static int detect_memory_e820(void)
+ so they must be either used for the error output
+ or explicitly marked clobbered. Given that, assume there
+ is something out there clobbering %ebp and %edi, too. */
+- asm("pushl %%ebp; int $0x15; popl %%ebp; setc %0"
++ asm volatile("pushl %%ebp; int $0x15; popl %%ebp; setc %0"
+ : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
+ "=D" (edi), "+m" (buf)
+ : "D" (&buf), "d" (SMAP), "a" (0xe820)
+@@ -83,7 +83,7 @@ static int detect_memory_e801(void)
+
+ bx = cx = dx = 0;
+ ax = 0xe801;
+- asm("stc; int $0x15; setc %0"
++ asm volatile("stc; int $0x15; setc %0"
+ : "=m" (err), "+a" (ax), "+b" (bx), "+c" (cx), "+d" (dx));
+
+ if (err)
+@@ -113,7 +113,7 @@ static int detect_memory_88(void)
+ u8 err;
+
+ ax = 0x8800;
+- asm("stc; int $0x15; setc %0" : "=bcdm" (err), "+a" (ax));
++ asm volatile("stc; int $0x15; setc %0" : "=bcdm" (err), "+a" (ax));
+
+ boot_params.screen_info.ext_mem_k = ax;
+
+diff -urNp linux-2.6.30.8/arch/x86/boot/video.c linux-2.6.30.8/arch/x86/boot/video.c
+--- linux-2.6.30.8/arch/x86/boot/video.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/video.c 2009-07-30 09:48:09.920627513 -0400
+@@ -23,7 +23,7 @@ static void store_cursor_position(void)
+
+ ax = 0x0300;
+ bx = 0;
+- asm(INT10
++ asm volatile(INT10
+ : "=d" (curpos), "+a" (ax), "+b" (bx)
+ : : "ecx", "esi", "edi");
+
+@@ -38,7 +38,7 @@ static void store_video_mode(void)
+ /* N.B.: the saving of the video page here is a bit silly,
+ since we pretty much assume page 0 everywhere. */
+ ax = 0x0f00;
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "=b" (page)
+ : : "ecx", "edx", "esi", "edi");
+
+diff -urNp linux-2.6.30.8/arch/x86/boot/video-vesa.c linux-2.6.30.8/arch/x86/boot/video-vesa.c
+--- linux-2.6.30.8/arch/x86/boot/video-vesa.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/boot/video-vesa.c 2009-07-30 09:48:09.920627513 -0400
+@@ -41,7 +41,7 @@ static int vesa_probe(void)
+
+ ax = 0x4f00;
+ di = (size_t)&vginfo;
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "+D" (di), "=m" (vginfo)
+ : : "ebx", "ecx", "edx", "esi");
+
+@@ -68,7 +68,7 @@ static int vesa_probe(void)
+ ax = 0x4f01;
+ cx = mode;
+ di = (size_t)&vminfo;
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo)
+ : : "ebx", "edx", "esi");
+
+@@ -120,7 +120,7 @@ static int vesa_set_mode(struct mode_inf
+ ax = 0x4f01;
+ cx = vesa_mode;
+ di = (size_t)&vminfo;
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo)
+ : : "ebx", "edx", "esi");
+
+@@ -202,19 +202,20 @@ static void vesa_dac_set_8bits(void)
+ /* Save the VESA protected mode info */
+ static void vesa_store_pm_info(void)
+ {
+- u16 ax, bx, di, es;
++ u16 ax, bx, cx, di, es;
+
+ ax = 0x4f0a;
+- bx = di = 0;
+- asm("pushw %%es; "INT10"; movw %%es,%0; popw %%es"
+- : "=d" (es), "+a" (ax), "+b" (bx), "+D" (di)
+- : : "ecx", "esi");
++ bx = cx = di = 0;
++ asm volatile("pushw %%es; "INT10"; movw %%es,%0; popw %%es"
++ : "=d" (es), "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
++ : : "esi");
+
+ if (ax != 0x004f)
+ return;
+
+ boot_params.screen_info.vesapm_seg = es;
+ boot_params.screen_info.vesapm_off = di;
++ boot_params.screen_info.vesapm_size = cx;
+ }
+
+ /*
+@@ -268,7 +269,7 @@ void vesa_store_edid(void)
+ /* Note: The VBE DDC spec is different from the main VESA spec;
+ we genuinely have to assume all registers are destroyed here. */
+
+- asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
++ asm volatile("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
+ : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
+ : : "esi", "edx");
+
+@@ -283,7 +284,7 @@ void vesa_store_edid(void)
+ cx = 0; /* Controller 0 */
+ dx = 0; /* EDID block number */
+ di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info),
+ "+c" (cx), "+D" (di)
+ : : "esi");
+diff -urNp linux-2.6.30.8/arch/x86/boot/video-vga.c linux-2.6.30.8/arch/x86/boot/video-vga.c
+--- linux-2.6.30.8/arch/x86/boot/video-vga.c 2009-07-30 20:32:40.362766121 -0400
++++ linux-2.6.30.8/arch/x86/boot/video-vga.c 2009-07-30 20:35:05.409914191 -0400
+@@ -260,7 +260,7 @@ static int vga_probe(void)
+ u8 vga_flag;
+
+ ax = 0x1200;
+- asm(INT10
++ asm volatile(INT10
+ : "+a" (ax), "=b" (ega_bx)
+ : "b" (0x10) /* Check EGA/VGA */
+ : "ecx", "edx", "esi", "edi");
+@@ -272,7 +272,7 @@ static int vga_probe(void)
+ /* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
+ if ((u8)ega_bx != 0x10) {
+ /* EGA/VGA */
+- asm(INT10
++ asm volatile(INT10
+ : "=a" (vga_flag)
+ : "a" (0x1a00)
+ : "ebx", "ecx", "edx", "esi", "edi");
+diff -urNp linux-2.6.30.8/arch/x86/ia32/ia32_signal.c linux-2.6.30.8/arch/x86/ia32/ia32_signal.c
+--- linux-2.6.30.8/arch/x86/ia32/ia32_signal.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/ia32/ia32_signal.c 2009-07-30 09:48:09.921498916 -0400
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/alternative.h linux-2.6.30.8/arch/x86/include/asm/alternative.h
+--- linux-2.6.30.8/arch/x86/include/asm/alternative.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/alternative.h 2009-07-30 09:48:09.921498916 -0400
+@@ -96,7 +96,7 @@ const unsigned char *const *find_nop_tab
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement,\"ax\"\n" \
++ ".section .altinstr_replacement,\"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+@@ -120,7 +120,7 @@ const unsigned char *const *find_nop_tab
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement,\"ax\"\n" \
++ ".section .altinstr_replacement,\"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature), ##input)
+
+@@ -135,7 +135,7 @@ const unsigned char *const *find_nop_tab
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement,\"ax\"\n" \
++ ".section .altinstr_replacement,\"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" : output : [feat] "i" (feature), ##input)
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/apm.h linux-2.6.30.8/arch/x86/include/asm/apm.h
+--- linux-2.6.30.8/arch/x86/include/asm/apm.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/apm.h 2009-07-30 09:48:09.921498916 -0400
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/atomic_32.h linux-2.6.30.8/arch/x86/include/asm/atomic_32.h
+--- linux-2.6.30.8/arch/x86/include/asm/atomic_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/atomic_32.h 2009-07-30 09:48:09.921498916 -0400
+@@ -39,7 +39,29 @@
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -53,7 +75,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -71,7 +115,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -85,7 +138,30 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -97,7 +173,18 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (v->counter));
+ }
+
+@@ -113,7 +200,19 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -131,7 +230,19 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -150,7 +261,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -173,7 +293,15 @@ static inline int atomic_add_return(int
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -214,17 +342,28 @@ static inline int atomic_sub_return(int
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/atomic_64.h linux-2.6.30.8/arch/x86/include/asm/atomic_64.h
+--- linux-2.6.30.8/arch/x86/include/asm/atomic_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/atomic_64.h 2009-07-30 09:48:09.922664908 -0400
+@@ -38,7 +38,29 @@
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -52,7 +74,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_sub_unchecked - subtract the atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -70,7 +114,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -84,7 +137,32 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -97,7 +175,19 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -114,7 +204,20 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -132,7 +235,20 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -151,7 +267,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -167,7 +292,15 @@ static inline int atomic_add_negative(in
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+ int __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -212,7 +345,15 @@ static inline int atomic_sub_return(int
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addq %1,%0"
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -226,7 +367,15 @@ static inline void atomic64_add(long i,
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -244,7 +393,16 @@ static inline int atomic64_sub_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -258,7 +416,19 @@ static inline int atomic64_sub_and_test(
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incq %0"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -271,7 +441,19 @@ static inline void atomic64_inc(atomic64
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -288,7 +470,20 @@ static inline int atomic64_dec_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -306,7 +501,20 @@ static inline int atomic64_inc_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -325,7 +533,16 @@ static inline int atomic64_add_negative(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -341,7 +558,15 @@ static inline int atomic64_add_negative(
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -372,17 +597,29 @@ static inline long atomic64_sub_return(l
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -398,17 +635,29 @@ static inline int atomic_add_unless(atom
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "er" (a));
++
++ old = atomic64_cmpxchg((v), c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ /**
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/boot.h linux-2.6.30.8/arch/x86/include/asm/boot.h
+--- linux-2.6.30.8/arch/x86/include/asm/boot.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/boot.h 2009-07-30 09:48:09.922664908 -0400
+@@ -9,10 +9,15 @@
+ #ifdef __KERNEL__
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ #ifdef CONFIG_KERNEL_BZIP2
+ #define BOOT_HEAP_SIZE 0x400000
+ #else /* !CONFIG_KERNEL_BZIP2 */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/cache.h linux-2.6.30.8/arch/x86/include/asm/cache.h
+--- linux-2.6.30.8/arch/x86/include/asm/cache.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/cache.h 2009-07-30 09:48:09.923412137 -0400
+@@ -6,6 +6,7 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
++#define __read_only __attribute__((__section__(".data.read_only")))
+
+ #ifdef CONFIG_X86_VSMP
+ /* vSMP Internode cacheline shift */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/checksum_32.h linux-2.6.30.8/arch/x86/include/asm/checksum_32.h
+--- linux-2.6.30.8/arch/x86/include/asm/checksum_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/checksum_32.h 2009-07-30 09:48:09.923412137 -0400
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -177,7 +185,7 @@ static inline __wsum csum_and_copy_to_us
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/desc.h linux-2.6.30.8/arch/x86/include/asm/desc.h
+--- linux-2.6.30.8/arch/x86/include/asm/desc.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/desc.h 2009-07-30 09:48:09.923412137 -0400
+@@ -16,6 +16,7 @@ static inline void fill_ldt(struct desc_
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+@@ -32,16 +33,12 @@ static inline void fill_ldt(struct desc_
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -116,19 +113,48 @@ static inline void paravirt_free_ldt(str
+ static inline void native_write_idt_entry(gate_desc *idt, int entry,
+ const gate_desc *gate)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ memcpy(&idt[entry], gate, sizeof(*gate));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
+ const void *desc)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ memcpy(&ldt[entry], desc, 8);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+ const void *desc, int type)
+ {
+ unsigned int size;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ switch (type) {
+ case DESC_TSS:
+ size = sizeof(tss_desc);
+@@ -140,7 +166,17 @@ static inline void native_write_gdt_entr
+ size = sizeof(struct desc_struct);
+ break;
+ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ memcpy(&gdt[entry], desc, size);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -212,7 +248,19 @@ static inline void native_set_ldt(const
+
+ static inline void native_load_tr_desc(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -247,8 +295,19 @@ static inline void native_load_tls(struc
+ unsigned int i;
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ #define _LDT_empty(info) \
+@@ -380,6 +439,18 @@ static inline void set_system_intr_gate_
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #else
+ /*
+ * GET_DESC_BASE reads the descriptor base of the specified segment.
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/e820.h linux-2.6.30.8/arch/x86/include/asm/e820.h
+--- linux-2.6.30.8/arch/x86/include/asm/e820.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/e820.h 2009-07-30 09:48:09.924429298 -0400
+@@ -135,7 +135,7 @@ extern char *memory_setup(void);
+ #define ISA_END_ADDRESS 0x100000
+ #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #ifdef __KERNEL__
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/elf.h linux-2.6.30.8/arch/x86/include/asm/elf.h
+--- linux-2.6.30.8/arch/x86/include/asm/elf.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/elf.h 2009-07-30 09:48:09.927602624 -0400
+@@ -263,7 +263,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : 32)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : 32)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -315,8 +333,7 @@ do { \
+ #define ARCH_DLINFO \
+ do { \
+ if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -327,7 +344,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -341,7 +358,4 @@ extern int arch_setup_additional_pages(s
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_X86_ELF_H */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/futex.h linux-2.6.30.8/arch/x86/include/asm/futex.h
+--- linux-2.6.30.8/arch/x86/include/asm/futex.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/futex.h 2009-07-30 09:48:09.927602624 -0400
+@@ -11,6 +11,40 @@
+ #include <asm/processor.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_X86_32
++#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ asm volatile( \
++ "movw\t%w6, %%ds\n" \
++ "1:\t" insn "\n" \
++ "2:\tpushl\t%%ss\n" \
++ "\tpopl\t%%ds\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "3:\tmov\t%3, %1\n" \
++ "\tjmp\t2b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 3b) \
++ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
++
++#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ asm volatile("movw\t%w7, %%es\n" \
++ "1:\tmovl\t%%es:%2, %0\n" \
++ "\tmovl\t%0, %3\n" \
++ "\t" insn "\n" \
++ "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
++ "\tjnz\t1b\n" \
++ "3:\tpushl\t%%ss\n" \
++ "\tpopl\t%%es\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "4:\tmov\t%5, %1\n" \
++ "\tjmp\t3b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 4b) \
++ _ASM_EXTABLE(2b, 4b) \
++ : "=&a" (oldval), "=&r" (ret), \
++ "+m" (*uaddr), "=&r" (tem) \
++ : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
++#else
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+@@ -36,8 +70,9 @@
+ : "=&a" (oldval), "=&r" (ret), \
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
++#endif
+
+-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
++static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+ {
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+@@ -61,11 +96,20 @@ static inline int futex_atomic_op_inuser
+
+ switch (op) {
+ case FUTEX_OP_SET:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
++#else
+ __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_ADD:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
++ uaddr, oparg);
++#else
+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
+@@ -109,7 +153,7 @@ static inline int futex_atomic_op_inuser
+ return ret;
+ }
+
+-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
++static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
+ int newval)
+ {
+
+@@ -122,14 +166,27 @@ static inline int futex_atomic_cmpxchg_i
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ asm volatile(
++#ifdef CONFIG_X86_32
++ "\tmovw %w5, %%ds\n"
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ "2:\tpushl %%ss\n"
++ "\tpopl %%ds\n"
++ "\t.section .fixup, \"ax\"\n"
++#else
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
+ "2:\t.section .fixup, \"ax\"\n"
++#endif
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (oldval), "+m" (*uaddr)
++#ifdef CONFIG_X86_32
++ : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
++#else
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
++#endif
+ : "memory"
+ );
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/i387.h linux-2.6.30.8/arch/x86/include/asm/i387.h
+--- linux-2.6.30.8/arch/x86/include/asm/i387.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/i387.h 2009-07-30 09:48:09.927602624 -0400
+@@ -203,13 +203,8 @@ static inline void restore_fpu(struct ta
+ }
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/io_64.h linux-2.6.30.8/arch/x86/include/asm/io_64.h
+--- linux-2.6.30.8/arch/x86/include/asm/io_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/io_64.h 2009-07-30 09:48:09.927602624 -0400
+@@ -140,6 +140,17 @@ __OUTS(l)
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range (unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range (unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ #include <asm-generic/iomap.h>
+
+ void __memcpy_fromio(void *, unsigned long, unsigned);
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/irqflags.h linux-2.6.30.8/arch/x86/include/asm/irqflags.h
+--- linux-2.6.30.8/arch/x86/include/asm/irqflags.h 2009-08-24 20:46:56.210924412 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/irqflags.h 2009-08-24 21:08:41.524545551 -0400
+@@ -147,6 +147,8 @@ static inline unsigned long __raw_local_
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+ #define GET_CR0_INTO_EAX movl %cr0, %eax
++#define GET_CR0_INTO_EDX movl %cr0, %edx
++#define SET_CR0_FROM_EDX movl %edx, %cr0
+ #endif
+
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/kmap_types.h linux-2.6.30.8/arch/x86/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/x86/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/kmap_types.h 2009-07-30 09:48:09.928623877 -0400
+@@ -21,7 +21,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_CLEARPAGE,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/kvm_host.h linux-2.6.30.8/arch/x86/include/asm/kvm_host.h
+--- linux-2.6.30.8/arch/x86/include/asm/kvm_host.h 2009-09-09 17:37:32.538127379 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/kvm_host.h 2009-09-09 17:37:48.994379464 -0400
+@@ -532,7 +532,7 @@ struct kvm_x86_ops {
+ int (*get_mt_mask_shift)(void);
+ };
+
+-extern struct kvm_x86_ops *kvm_x86_ops;
++extern const struct kvm_x86_ops *kvm_x86_ops;
+
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/local.h linux-2.6.30.8/arch/x86/include/asm/local.h
+--- linux-2.6.30.8/arch/x86/include/asm/local.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/local.h 2009-07-30 09:48:09.929617473 -0400
+@@ -18,26 +18,90 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +225,24 @@ static inline int local_add_negative(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +265,23 @@ static inline long local_add_return(long
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_MOV "%0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/mman.h linux-2.6.30.8/arch/x86/include/asm/mman.h
+--- linux-2.6.30.8/arch/x86/include/asm/mman.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/mman.h 2009-07-30 09:48:09.929617473 -0400
+@@ -17,4 +17,14 @@
+ #define MCL_CURRENT 1 /* lock all current mappings */
+ #define MCL_FUTURE 2 /* lock all future mappings */
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/mmu_context.h linux-2.6.30.8/arch/x86/include/asm/mmu_context.h
+--- linux-2.6.30.8/arch/x86/include/asm/mmu_context.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/mmu_context.h 2009-07-30 09:48:09.929617473 -0400
+@@ -34,11 +34,17 @@ static inline void switch_mm(struct mm_s
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpu_clear(cpu, prev->cpu_vm_mask);
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_X86_32
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+@@ -52,6 +58,26 @@ static inline void switch_mm(struct mm_s
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!nx_enabled) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit
++#ifdef CONFIG_SMP
++ || tlbstate != TLBSTATE_OK
++#endif
++ ))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+ #ifdef CONFIG_SMP
+ else {
+@@ -65,6 +91,19 @@ static inline void switch_mm(struct mm_s
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!nx_enabled)
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+ }
+ #endif
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/mmu.h linux-2.6.30.8/arch/x86/include/asm/mmu.h
+--- linux-2.6.30.8/arch/x86/include/asm/mmu.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/mmu.h 2009-07-30 09:48:09.929617473 -0400
+@@ -9,10 +9,23 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
++
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/module.h linux-2.6.30.8/arch/x86/include/asm/module.h
+--- linux-2.6.30.8/arch/x86/include/asm/module.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/module.h 2009-07-30 11:10:48.877547128 -0400
+@@ -74,7 +74,12 @@ struct mod_arch_specific {};
+ # else
+ # define MODULE_STACKSIZE ""
+ # endif
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
++# ifdef CONFIG_GRKERNSEC
++# define MODULE_GRSEC "GRSECURITY "
++# else
++# define MODULE_GRSEC ""
++# endif
++# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC
+ #endif
+
+ #endif /* _ASM_X86_MODULE_H */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/page_32_types.h linux-2.6.30.8/arch/x86/include/asm/page_32_types.h
+--- linux-2.6.30.8/arch/x86/include/asm/page_32_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/page_32_types.h 2009-07-30 09:48:09.930625879 -0400
+@@ -15,6 +15,23 @@
+ */
+ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_VADDR[];
++extern unsigned char MODULES_END[];
++extern unsigned char KERNEL_TEXT_OFFSET[];
++#define ktla_ktva(addr) (addr + (unsigned long)KERNEL_TEXT_OFFSET)
++#define ktva_ktla(addr) (addr - (unsigned long)KERNEL_TEXT_OFFSET)
++#endif
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
++#endif
++
+ #ifdef CONFIG_4KSTACKS
+ #define THREAD_ORDER 0
+ #else
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/page_64_types.h linux-2.6.30.8/arch/x86/include/asm/page_64_types.h
+--- linux-2.6.30.8/arch/x86/include/asm/page_64_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/page_64_types.h 2009-07-30 09:48:09.930625879 -0400
+@@ -47,6 +47,9 @@
+ #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+ #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /* See Documentation/x86_64/mm.txt for a description of the memory map. */
+ #define __PHYSICAL_MASK_SHIFT 46
+ #define __VIRTUAL_MASK_SHIFT 48
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/paravirt.h linux-2.6.30.8/arch/x86/include/asm/paravirt.h
+--- linux-2.6.30.8/arch/x86/include/asm/paravirt.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/paravirt.h 2009-07-30 09:48:09.931536832 -0400
+@@ -1688,7 +1688,7 @@ static inline unsigned long __raw_local_
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgalloc.h linux-2.6.30.8/arch/x86/include/asm/pgalloc.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgalloc.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgalloc.h 2009-09-05 22:09:36.037016937 -0400
+@@ -52,6 +52,13 @@ static inline void pmd_populate_kernel(s
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable-2level.h linux-2.6.30.8/arch/x86/include/asm/pgtable-2level.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable-2level.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable-2level.h 2009-07-30 09:48:09.931536832 -0400
+@@ -18,7 +18,19 @@ static inline void native_set_pte(pte_t
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ *pmdp = pmd;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable_32.h linux-2.6.30.8/arch/x86/include/asm/pgtable_32.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable_32.h 2009-09-05 22:09:36.076613109 -0400
+@@ -26,8 +26,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +46,11 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
+@@ -80,6 +83,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable-3level.h linux-2.6.30.8/arch/x86/include/asm/pgtable-3level.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable-3level.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable-3level.h 2009-07-30 09:48:09.931536832 -0400
+@@ -38,12 +38,36 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable_64.h linux-2.6.30.8/arch/x86/include/asm/pgtable_64.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable_64.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable_64.h 2009-09-05 22:09:36.083864233 -0400
+@@ -16,9 +16,11 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
++extern pmd_t level2_ident_pgt[512*4];
+ extern pgd_t init_level4_pgt[];
+
+ #define swapper_pg_dir init_level4_pgt
+@@ -78,7 +80,19 @@ static inline pte_t native_ptep_get_and_
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ *pmdp = pmd;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable.h linux-2.6.30.8/arch/x86/include/asm/pgtable.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable.h 2009-07-30 09:48:09.932929020 -0400
+@@ -87,6 +87,11 @@ static inline void __init paravirt_paget
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -169,9 +174,29 @@ static inline pte_t pte_wrprotect(pte_t
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -467,7 +492,7 @@ static inline pud_t *pud_offset(pgd_t *p
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -606,7 +631,19 @@ static inline void ptep_set_wrprotect(st
+ */
+ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
++ memcpy(dst, src, count * sizeof(pgd_t));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/pgtable_types.h linux-2.6.30.8/arch/x86/include/asm/pgtable_types.h
+--- linux-2.6.30.8/arch/x86/include/asm/pgtable_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/pgtable_types.h 2009-07-30 19:56:23.227966500 -0400
+@@ -16,12 +16,11 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_UNUSED3 11
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -39,7 +38,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+@@ -51,7 +49,7 @@
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+ #else
+-#define _PAGE_NX (_AT(pteval_t, 0))
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -88,6 +86,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -98,8 +99,8 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -158,8 +159,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -272,7 +273,11 @@ static inline pteval_t pte_flags(pte_t p
+ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
++#ifdef CONFIG_X86_32
+ extern int nx_enabled;
++#else
++#define nx_enabled (1)
++#endif
+ extern void set_nx(void);
+
+ #define pgprot_writecombine pgprot_writecombine
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/processor.h linux-2.6.30.8/arch/x86/include/asm/processor.h
+--- linux-2.6.30.8/arch/x86/include/asm/processor.h 2009-09-26 23:07:15.350360380 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/processor.h 2009-09-26 23:07:26.411879783 -0400
+@@ -270,7 +270,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -876,8 +876,17 @@ static inline void spin_lock_prefetch(co
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+ .sp0 = sizeof(init_stack) + (long)&init_stack, \
+@@ -895,7 +904,7 @@ static inline void spin_lock_prefetch(co
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -906,11 +915,7 @@ static inline void spin_lock_prefetch(co
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((info)->task.thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -925,7 +930,7 @@ extern unsigned long thread_saved_pc(str
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -941,7 +946,7 @@ extern unsigned long thread_saved_pc(str
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -978,6 +983,10 @@ extern void start_thread(struct pt_regs
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/ptrace.h linux-2.6.30.8/arch/x86/include/asm/ptrace.h
+--- linux-2.6.30.8/arch/x86/include/asm/ptrace.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/ptrace.h 2009-07-30 09:48:09.933533479 -0400
+@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/reboot.h linux-2.6.30.8/arch/x86/include/asm/reboot.h
+--- linux-2.6.30.8/arch/x86/include/asm/reboot.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/reboot.h 2009-07-30 09:48:09.933533479 -0400
+@@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(const unsigned char *code, int length);
++void machine_real_restart(const unsigned char *code, unsigned int length);
+
+ typedef void (*nmi_shootdown_cb)(int, struct die_args*);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/rwsem.h linux-2.6.30.8/arch/x86/include/asm/rwsem.h
+--- linux-2.6.30.8/arch/x86/include/asm/rwsem.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/rwsem.h 2009-07-30 09:48:09.934667198 -0400
+@@ -106,10 +106,26 @@ static inline void __down_read(struct rw
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX " incl (%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl (%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* adds 0x00000001, returns the old value */
+- " jns 1f\n"
++ " jns 2f\n"
+ " call call_rwsem_down_read_failed\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending down_read\n\t"
+ : "+m" (sem->count)
+ : "a" (sem)
+@@ -124,13 +140,29 @@ static inline int __down_read_trylock(st
+ __s32 result, tmp;
+ asm volatile("# beginning __down_read_trylock\n\t"
+ " movl %0,%1\n\t"
+- "1:\n\t"
++ "2:\n\t"
+ " movl %1,%2\n\t"
+ " addl %3,%2\n\t"
+- " jle 2f\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "subl %3,%2\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ " jle 3f\n\t"
+ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+- " jnz 1b\n\t"
+- "2:\n\t"
++ " jnz 2b\n\t"
++ "3:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
+@@ -148,12 +180,28 @@ static inline void __down_write_nested(s
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtract 0x0000ffff, returns the old value */
+ " testl %%edx,%%edx\n\t"
+ /* was the count 0 before? */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_down_write_failed\n"
+- "1:\n"
++ "2:\n"
+ "# ending down_write"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -186,10 +234,26 @@ static inline void __up_read(struct rw_s
+ __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtracts 1, returns the old value */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_wake\n"
+- "1:\n"
++ "2:\n"
+ "# ending __up_read\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -204,11 +268,27 @@ static inline void __up_write(struct rw_
+ asm volatile("# beginning __up_write\n\t"
+ " movl %2,%%edx\n\t"
+ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %%edx,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __up_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+@@ -222,10 +302,26 @@ static inline void __downgrade_write(str
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX " addl %2,(%%eax)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %2,(%%eax)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+@@ -237,7 +333,23 @@ static inline void __downgrade_write(str
+ */
+ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (sem->count)
+ : "ir" (delta));
+ }
+@@ -249,7 +361,23 @@ static inline int rwsem_atomic_update(in
+ {
+ int tmp = delta;
+
+- asm volatile(LOCK_PREFIX "xadd %0,%1"
++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "movl %0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/segment.h linux-2.6.30.8/arch/x86/include/asm/segment.h
+--- linux-2.6.30.8/arch/x86/include/asm/segment.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/segment.h 2009-07-30 09:48:09.934667198 -0400
+@@ -88,7 +88,7 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
+ #ifdef CONFIG_SMP
+ #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
+ #else
+@@ -102,6 +102,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -139,7 +145,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/spinlock.h linux-2.6.30.8/arch/x86/include/asm/spinlock.h
+--- linux-2.6.30.8/arch/x86/include/asm/spinlock.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/spinlock.h 2009-07-30 09:48:09.934667198 -0400
+@@ -249,18 +249,50 @@ static inline int __raw_write_can_lock(r
+ static inline void __raw_read_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+- "jns 1f\n"
+- "call __read_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl $1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jns 2f\n"
++ "call __read_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw) : "memory");
+ }
+
+ static inline void __raw_write_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
+- "jz 1f\n"
+- "call __write_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl %1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jz 2f\n"
++ "call __write_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+@@ -286,12 +318,45 @@ static inline int __raw_write_trylock(ra
+
+ static inline void __raw_read_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void __raw_write_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "addl %1, %0"
++ asm volatile(LOCK_PREFIX "addl %1, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/system.h linux-2.6.30.8/arch/x86/include/asm/system.h
+--- linux-2.6.30.8/arch/x86/include/asm/system.h 2009-09-26 23:07:15.353790143 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/system.h 2009-09-26 23:07:26.432712429 -0400
+@@ -227,7 +227,7 @@ static inline unsigned long get_limit(un
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -353,6 +353,23 @@ static inline void native_wbinvd(void)
+
+ #define stts() write_cr0(read_cr0() | X86_CR0_TS)
+
++#define pax_open_kernel(cr0) \
++do { \
++ typecheck(unsigned long, cr0); \
++ preempt_disable(); \
++ barrier(); \
++ cr0 = read_cr0(); \
++ write_cr0(cr0 & ~X86_CR0_WP); \
++} while (0)
++
++#define pax_close_kernel(cr0) \
++do { \
++ typecheck(unsigned long, cr0); \
++ write_cr0(cr0); \
++ barrier(); \
++ preempt_enable_no_resched(); \
++} while (0)
++
+ #endif /* __KERNEL__ */
+
+ static inline void clflush(volatile void *__p)
+@@ -367,7 +384,7 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/uaccess_32.h linux-2.6.30.8/arch/x86/include/asm/uaccess_32.h
+--- linux-2.6.30.8/arch/x86/include/asm/uaccess_32.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/uaccess_32.h 2009-07-30 09:48:09.936413079 -0400
+@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -138,6 +146,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -160,6 +174,10 @@ static __always_inline unsigned long __c
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -182,14 +200,62 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
++ if ((long)n < 0)
++ return n;
++
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
++
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static __always_inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static __always_inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
++ return n;
+ }
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
+ long __must_check strncpy_from_user(char *dst, const char __user *src,
+ long count);
+ long __must_check __strncpy_from_user(char *dst,
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/uaccess_64.h linux-2.6.30.8/arch/x86/include/asm/uaccess_64.h
+--- linux-2.6.30.8/arch/x86/include/asm/uaccess_64.h 2009-07-30 20:32:40.365617606 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/uaccess_64.h 2009-07-30 20:32:47.927601167 -0400
+@@ -10,6 +10,8 @@
+ #include <linux/lockdep.h>
+ #include <asm/page.h>
+
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
++
+ /*
+ * Copy To/From Userspace
+ */
+@@ -19,20 +21,22 @@ __must_check unsigned long
+ copy_user_generic(void *to, const void *from, unsigned len);
+
+ __must_check unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+ copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
+ return copy_user_generic(dst, (__force void *)src, size);
++ }
+ switch (size) {
+ case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+@@ -70,13 +74,19 @@ int __copy_from_user(void *dst, const vo
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
+ return copy_user_generic((__force void *)dst, src, size);
++ }
+ switch (size) {
+ case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+@@ -114,11 +124,39 @@ int __copy_to_user(void __user *dst, con
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
+ {
+- int ret = 0;
++ if (access_ok(VERIFY_WRITE, to, len))
++ len = __copy_to_user(to, from, len);
++ return len;
++}
++
++static __always_inline __must_check
++unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
++{
++ if ((int)len < 0)
++ return len;
++
++ if (access_ok(VERIFY_READ, from, len))
++ len = __copy_from_user(to, from, len);
++ else if ((int)len > 0) {
++ if (!__builtin_constant_p(len))
++ check_object_size(to, len, false);
++ memset(to, 0, len);
++ }
++ return len;
++}
++
++static __always_inline __must_check
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++{
++ unsigned ret = 0;
+
+ might_fault();
++
++ if ((int)size < 0)
++ return size;
++
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+@@ -179,30 +217,38 @@ __must_check unsigned long __clear_user(
+ __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+ unsigned size);
+
+-static __must_check __always_inline int
++static __must_check __always_inline unsigned long
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+ {
+ might_sleep();
++
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
++extern unsigned long
+ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/uaccess.h linux-2.6.30.8/arch/x86/include/asm/uaccess.h
+--- linux-2.6.30.8/arch/x86/include/asm/uaccess.h 2009-07-30 20:32:40.364705510 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/uaccess.h 2009-08-09 07:48:47.926451868 -0400
+@@ -8,8 +8,10 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/slab.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
++#include <asm/segment.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+@@ -29,7 +31,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#ifdef CONFIG_X86_32
++void __set_fs(mm_segment_t x, int cpu);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -183,13 +190,21 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#ifdef CONFIG_X86_32
++#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
++#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
++#else
++#define _ASM_LOAD_USER_DS(ds)
++#define _ASM_LOAD_KERNEL_DS
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: movl %%eax,%%ds:0(%2)\n" \
++ "2: movl %%edx,%%ds:4(%2)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+@@ -197,15 +212,18 @@ extern int __get_user_bad(void);
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (err) \
+- : "A" (x), "r" (addr), "i" (errret), "0" (err))
++ : "A" (x), "r" (addr), "i" (errret), "0" (err), \
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: movl %%eax,%%ds:0(%1)\n" \
++ "2: movl %%edx,%%ds:4(%1)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+- : : "A" (x), "r" (addr))
++ : : "A" (x), "r" (addr), "r"(__USER_DS))
+
+ #define __put_user_x8(x, ptr, __ret_pu) \
+ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
+@@ -374,16 +392,18 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %%ds:%2,%"rtype"1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
+- : "m" (__m(addr)), "i" (errret), "0" (err))
++ : "=r" (err), ltype (x) \
++ : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
+
+ #define __get_user_size_ex(x, ptr, size) \
+ do { \
+@@ -407,10 +427,12 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %%ds:%1,%"rtype"0\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : ltype(x) : "m" (__m(addr)))
++ : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
+
+ #define __put_user_nocheck(x, ptr, size) \
+ ({ \
+@@ -438,21 +460,26 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %"rtype"1,%%ds:%2\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %"rtype"0,%%ds:%1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : : ltype(x), "m" (__m(addr)))
++ : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
+
+ /*
+ * uaccess_try and catch
+@@ -567,6 +594,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/vgtod.h linux-2.6.30.8/arch/x86/include/asm/vgtod.h
+--- linux-2.6.30.8/arch/x86/include/asm/vgtod.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/vgtod.h 2009-07-30 09:48:09.936413079 -0400
+@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
+ int sysctl_enabled;
+ struct timezone sys_tz;
+ struct { /* extract of a clocksource struct */
++ char name[8];
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+diff -urNp linux-2.6.30.8/arch/x86/include/asm/vsyscall.h linux-2.6.30.8/arch/x86/include/asm/vsyscall.h
+--- linux-2.6.30.8/arch/x86/include/asm/vsyscall.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/include/asm/vsyscall.h 2009-07-30 09:48:09.937413620 -0400
+@@ -15,9 +15,10 @@ enum vsyscall_num {
+
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
++#include <linux/getcpu.h>
++#include <linux/time.h>
+
+ #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+
+ /* Definitions for CONFIG_GENERIC_TIME definitions */
+ #define __section_vsyscall_gtod_data __attribute__ \
+@@ -31,7 +32,6 @@ enum vsyscall_num {
+ #define VGETCPU_LSL 2
+
+ extern int __vgetcpu_mode;
+-extern volatile unsigned long __jiffies;
+
+ /* kernel space (writeable) */
+ extern int vgetcpu_mode;
+@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
+
+ extern void map_vsyscall(void);
+
++extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
++extern time_t vtime(time_t *t);
++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff -urNp linux-2.6.30.8/arch/x86/Kconfig linux-2.6.30.8/arch/x86/Kconfig
+--- linux-2.6.30.8/arch/x86/Kconfig 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/Kconfig 2009-08-04 17:52:34.387861424 -0400
+@@ -345,6 +345,7 @@ config X86_VSMP
+ select PARAVIRT
+ depends on X86_64 && PCI
+ depends on X86_EXTENDED_PLATFORM
++ depends on !PAX_KERNEXEC
+ ---help---
+ Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
+ supposed to run on these EM64T-based machines. Only choose this option
+@@ -464,6 +465,7 @@ config VMI
+ bool "VMI Guest support"
+ select PARAVIRT
+ depends on X86_32
++ depends on !PAX_KERNEXEC
+ ---help---
+ VMI provides a paravirtualized interface to the VMware ESX server
+ (it could be used by other hypervisors in theory too, but is not
+@@ -474,6 +476,7 @@ config KVM_CLOCK
+ bool "KVM paravirtualized clock"
+ select PARAVIRT
+ select PARAVIRT_CLOCK
++ depends on !PAX_KERNEXEC
+ ---help---
+ Turning on this option will allow you to run a paravirtualized clock
+ when running over the KVM hypervisor. Instead of relying on a PIT
+@@ -484,6 +487,7 @@ config KVM_CLOCK
+ config KVM_GUEST
+ bool "KVM Guest support"
+ select PARAVIRT
++ depends on !PAX_KERNEXEC
+ ---help---
+ This option enables various optimizations for running under the KVM
+ hypervisor.
+@@ -492,6 +496,7 @@ source "arch/x86/lguest/Kconfig"
+
+ config PARAVIRT
+ bool "Enable paravirtualization code"
++ depends on !PAX_KERNEXEC
+ ---help---
+ This changes the kernel so it can modify itself when it is run
+ under a hypervisor, potentially improving performance significantly
+@@ -1058,7 +1063,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1376,7 +1381,7 @@ config X86_PAT
+
+ config EFI
+ bool "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !PAX_KERNEXEC
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1466,9 +1471,7 @@ config KEXEC_JUMP
+
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+- default "0x1000000" if X86_NUMAQ
+- default "0x200000" if X86_64
+- default "0x100000"
++ default "0x1000000"
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1527,8 +1530,7 @@ config RELOCATABLE
+ config PHYSICAL_ALIGN
+ hex
+ prompt "Alignment value to which kernel should be aligned" if X86_32
+- default "0x100000" if X86_32
+- default "0x200000" if X86_64
++ default "0x200000"
+ range 0x2000 0x400000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1560,9 +1562,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+ ---help---
+diff -urNp linux-2.6.30.8/arch/x86/Kconfig.cpu linux-2.6.30.8/arch/x86/Kconfig.cpu
+--- linux-2.6.30.8/arch/x86/Kconfig.cpu 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/Kconfig.cpu 2009-07-30 09:48:09.916592662 -0400
+@@ -331,7 +331,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_WP_WORKS_OK
+ def_bool y
+@@ -351,7 +351,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -397,7 +397,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -urNp linux-2.6.30.8/arch/x86/Kconfig.debug linux-2.6.30.8/arch/x86/Kconfig.debug
+--- linux-2.6.30.8/arch/x86/Kconfig.debug 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/Kconfig.debug 2009-07-30 09:48:09.916592662 -0400
+@@ -99,7 +99,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+diff -urNp linux-2.6.30.8/arch/x86/kernel/acpi/boot.c linux-2.6.30.8/arch/x86/kernel/acpi/boot.c
+--- linux-2.6.30.8/arch/x86/kernel/acpi/boot.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/acpi/boot.c 2009-07-30 09:48:09.938432163 -0400
+@@ -1737,7 +1737,7 @@ static struct dmi_system_id __initdata a
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
+- {}
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.30.8/arch/x86/kernel/acpi/realmode/wakeup.S
+--- linux-2.6.30.8/arch/x86/kernel/acpi/realmode/wakeup.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/acpi/realmode/wakeup.S 2009-07-30 09:48:09.938432163 -0400
+@@ -104,7 +104,7 @@ _start:
+ movl %eax, %ecx
+ orl %edx, %ecx
+ jz 1f
+- movl $0xc0000080, %ecx
++ mov $MSR_EFER, %ecx
+ wrmsr
+ 1:
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/acpi/sleep.c linux-2.6.30.8/arch/x86/kernel/acpi/sleep.c
+--- linux-2.6.30.8/arch/x86/kernel/acpi/sleep.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/acpi/sleep.c 2009-07-30 09:48:09.938432163 -0400
+@@ -11,11 +11,12 @@
+ #include <linux/cpumask.h>
+ #include <asm/segment.h>
+ #include <asm/desc.h>
++#include <asm/e820.h>
+
+ #include "realmode/wakeup.h"
+ #include "sleep.h"
+
+-unsigned long acpi_wakeup_address;
++unsigned long acpi_wakeup_address = 0x2000;
+ unsigned long acpi_realmode_flags;
+
+ /* address in low memory of the wakeup routine. */
+@@ -37,6 +38,10 @@ int acpi_save_state_mem(void)
+ {
+ struct wakeup_header *header;
+
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long cr0;
++#endif
++
+ if (!acpi_realmode) {
+ printk(KERN_ERR "Could not allocate memory during boot, "
+ "S3 disabled\n");
+@@ -99,8 +104,18 @@ int acpi_save_state_mem(void)
+ header->trampoline_segment = setup_trampoline() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start.sp = temp_stack + sizeof(temp_stack);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+@@ -134,14 +149,8 @@ void __init acpi_reserve_bootmem(void)
+ return;
+ }
+
+- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
+-
+- if (!acpi_realmode) {
+- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
+- return;
+- }
+-
+- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
++ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
++ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
+ }
+
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.30.8/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-2.6.30.8/arch/x86/kernel/acpi/wakeup_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/acpi/wakeup_32.S 2009-07-30 09:48:09.938432163 -0400
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff -urNp linux-2.6.30.8/arch/x86/kernel/alternative.c linux-2.6.30.8/arch/x86/kernel/alternative.c
+--- linux-2.6.30.8/arch/x86/kernel/alternative.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/alternative.c 2009-07-30 09:48:09.939725122 -0400
+@@ -400,7 +400,7 @@ void apply_paravirt(struct paravirt_patc
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -485,11 +485,26 @@ void __init alternative_instructions(voi
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *text_poke_early(void *addr, const void *opcode, size_t len)
++void *__kprobes text_poke_early(void *addr, const void *opcode, size_t len)
+ {
+ unsigned long flags;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ memcpy(ktla_ktva(addr), opcode, len);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ local_irq_restore(flags);
+ sync_core();
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+@@ -512,35 +527,27 @@ void *text_poke_early(void *addr, const
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
++
++ if (!core_kernel_text((unsigned long)addr)
+
+- if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++#if defined(CONFIG_X86_32) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ && (vaddr < MODULES_VADDR || MODULES_END < vaddr)
++#endif
++
++ ) {
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
+ return addr;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/kernel/apm_32.c linux-2.6.30.8/arch/x86/kernel/apm_32.c
+--- linux-2.6.30.8/arch/x86/kernel/apm_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/apm_32.c 2009-07-30 09:48:09.939725122 -0400
+@@ -403,7 +403,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitq
+ static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+ static struct apm_user *user_list;
+ static DEFINE_SPINLOCK(user_list_lock);
+-static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } };
++static const struct desc_struct bad_bios_desc = { { { 0, 0x00409300 } } };
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+
+@@ -576,12 +576,25 @@ static long __apm_bios_call(void *_call)
+ struct desc_struct *gdt;
+ struct apm_bios_call *call = _call;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ cpu = get_cpu();
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ gdt[0x40 / 8] = bad_bios_desc;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+ apm_bios_call_asm(call->func, call->ebx, call->ecx,
+@@ -589,7 +602,17 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ gdt[0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -652,19 +675,42 @@ static long __apm_bios_call_simple(void
+ struct desc_struct *gdt;
+ struct apm_bios_call *call = _call;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ cpu = get_cpu();
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ gdt[0x40 / 8] = bad_bios_desc;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+ error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ gdt[0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ put_cpu();
+ return error;
+ }
+@@ -967,7 +1013,7 @@ recalc:
+
+ static void apm_power_off(void)
+ {
+- unsigned char po_bios_call[] = {
++ const unsigned char po_bios_call[] = {
+ 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
+ 0x8e, 0xd0, /* movw ax,ss */
+ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
+@@ -1925,7 +1971,10 @@ static const struct file_operations apm_
+ static struct miscdevice apm_device = {
+ APM_MINOR_DEV,
+ "apm_bios",
+- &apm_bios_fops
++ &apm_bios_fops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+
+@@ -2246,7 +2295,7 @@ static struct dmi_system_id __initdata a
+ { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
+ },
+
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ /*
+@@ -2264,6 +2313,10 @@ static int __init apm_init(void)
+ struct desc_struct *gdt;
+ int err;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ dmi_check_system(apm_dmi_table);
+
+ if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
+@@ -2337,9 +2390,18 @@ static int __init apm_init(void)
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
+ _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ /*
+ * Set up the long jump entry point to the APM BIOS, which is called
+ * from inline assembly.
+@@ -2358,6 +2420,11 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ set_base(gdt[APM_CS >> 3],
+ __va((unsigned long)apm_info.bios.cseg << 4));
+ set_base(gdt[APM_CS_16 >> 3],
+@@ -2365,6 +2432,10 @@ static int __init apm_init(void)
+ set_base(gdt[APM_DS >> 3],
+ __va((unsigned long)apm_info.bios.dseg << 4));
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+ kapmd_task = kthread_create(apm, NULL, "kapmd");
+diff -urNp linux-2.6.30.8/arch/x86/kernel/asm-offsets_32.c linux-2.6.30.8/arch/x86/kernel/asm-offsets_32.c
+--- linux-2.6.30.8/arch/x86/kernel/asm-offsets_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/asm-offsets_32.c 2009-07-30 09:48:09.939725122 -0400
+@@ -115,6 +115,7 @@ void foo(void)
+ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+ #endif
+
+ #ifdef CONFIG_XEN
+diff -urNp linux-2.6.30.8/arch/x86/kernel/asm-offsets_64.c linux-2.6.30.8/arch/x86/kernel/asm-offsets_64.c
+--- linux-2.6.30.8/arch/x86/kernel/asm-offsets_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/asm-offsets_64.c 2009-07-30 09:48:09.939725122 -0400
+@@ -114,6 +114,7 @@ int main(void)
+ ENTRY(cr8);
+ BLANK();
+ #undef ENTRY
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/common.c linux-2.6.30.8/arch/x86/kernel/cpu/common.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/common.c 2009-09-26 23:07:15.388944836 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/common.c 2009-09-26 23:07:26.494755897 -0400
+@@ -60,60 +60,6 @@ void __init setup_cpu_local_masks(void)
+
+ static const struct cpu_dev *this_cpu __cpuinitdata;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
+- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
+- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
+- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
+- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
+- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+-#else
+- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
+- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
+- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
+- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+-
+- [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
+- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -320,7 +266,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -796,6 +742,10 @@ static void __cpuinit identify_cpu(struc
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -972,7 +922,7 @@ static __init int setup_disablecpuid(cha
+ __setup("clearcpuid=", setup_disablecpuid);
+
+ #ifdef CONFIG_X86_64
+-struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++struct desc_ptr idt_descr __read_only = { 256 * 16 - 1, (unsigned long) idt_table };
+
+ DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
+@@ -1082,7 +1032,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ orig_ist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1180,7 +1130,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2009-07-30 09:48:09.941068037 -0400
+@@ -590,7 +590,7 @@ static const struct dmi_system_id sw_any
+ DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+ #endif
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2009-07-30 09:48:09.941727851 -0400
+@@ -225,7 +225,7 @@ static struct cpu_model models[] =
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
+
+- { NULL, }
++ { NULL, NULL, 0, NULL}
+ };
+ #undef _BANIAS
+ #undef BANIAS
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/intel.c linux-2.6.30.8/arch/x86/kernel/cpu/intel.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/intel.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/intel.c 2009-07-30 09:48:09.941727851 -0400
+@@ -117,7 +117,7 @@ static void __cpuinit trap_init_f00f_bug
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/mcheck/mce_64.c linux-2.6.30.8/arch/x86/kernel/cpu/mcheck/mce_64.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/mcheck/mce_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/mcheck/mce_64.c 2009-07-30 09:48:09.941727851 -0400
+@@ -830,6 +830,7 @@ static struct miscdevice mce_log_device
+ MISC_MCELOG_MINOR,
+ "mcelog",
+ &mce_chrdev_ops,
++ {NULL, NULL}, NULL, NULL
+ };
+
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.30.8/arch/x86/kernel/cpu/mtrr/generic.c
+--- linux-2.6.30.8/arch/x86/kernel/cpu/mtrr/generic.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/cpu/mtrr/generic.c 2009-07-30 09:48:09.942991706 -0400
+@@ -23,14 +23,14 @@ static struct fixed_range_block fixed_ra
+ { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
+ { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
+ { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
+- {}
++ { 0, 0 }
+ };
+
+ static unsigned long smp_changes_mask;
+ static int mtrr_state_set;
+ u64 mtrr_tom2;
+
+-struct mtrr_state_type mtrr_state = {};
++struct mtrr_state_type mtrr_state;
+ EXPORT_SYMBOL_GPL(mtrr_state);
+
+ /**
+diff -urNp linux-2.6.30.8/arch/x86/kernel/crash.c linux-2.6.30.8/arch/x86/kernel/crash.c
+--- linux-2.6.30.8/arch/x86/kernel/crash.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/crash.c 2009-07-30 09:48:09.942991706 -0400
+@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
+ regs = args->regs;
+
+ #ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/kernel/doublefault_32.c linux-2.6.30.8/arch/x86/kernel/doublefault_32.c
+--- linux-2.6.30.8/arch/x86/kernel/doublefault_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/doublefault_32.c 2009-07-30 09:48:09.942991706 -0400
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cach
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff -urNp linux-2.6.30.8/arch/x86/kernel/dumpstack_32.c linux-2.6.30.8/arch/x86/kernel/dumpstack_32.c
+--- linux-2.6.30.8/arch/x86/kernel/dumpstack_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/dumpstack_32.c 2009-07-30 09:48:09.943696998 -0400
+@@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp,
+@@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+diff -urNp linux-2.6.30.8/arch/x86/kernel/dumpstack.c linux-2.6.30.8/arch/x86/kernel/dumpstack.c
+--- linux-2.6.30.8/arch/x86/kernel/dumpstack.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/dumpstack.c 2009-07-30 09:48:09.942991706 -0400
+@@ -180,7 +180,7 @@ void dump_stack(void)
+ #endif
+
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -241,7 +241,7 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -295,7 +295,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff -urNp linux-2.6.30.8/arch/x86/kernel/e820.c linux-2.6.30.8/arch/x86/kernel/e820.c
+--- linux-2.6.30.8/arch/x86/kernel/e820.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/e820.c 2009-07-30 09:48:09.943696998 -0400
+@@ -739,7 +739,10 @@ struct early_res {
+ };
+ static struct early_res early_res[MAX_EARLY_RES] __initdata = {
+ { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
+- {}
++#ifdef CONFIG_VM86
++ { PAGE_SIZE, ISA_START_ADDRESS, "V86 mode memory", 1 },
++#endif
++ { 0, 0, {0}, 0 }
+ };
+
+ static int __init find_overlapped_early(u64 start, u64 end)
+diff -urNp linux-2.6.30.8/arch/x86/kernel/efi_32.c linux-2.6.30.8/arch/x86/kernel/efi_32.c
+--- linux-2.6.30.8/arch/x86/kernel/efi_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/efi_32.c 2009-07-30 09:48:09.943696998 -0400
+@@ -38,70 +38,38 @@
+ */
+
+ static unsigned long efi_rt_eflags;
+-static pgd_t efi_bak_pg_dir_pointer[2];
++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
+
+-void efi_call_phys_prelog(void)
++void __init efi_call_phys_prelog(void)
+ {
+- unsigned long cr4;
+- unsigned long temp;
+ struct desc_ptr gdt_descr;
+
+ local_irq_save(efi_rt_eflags);
+
+- /*
+- * If I don't have PAE, I should just duplicate two entries in page
+- * directory. If I have PAE, I just need to duplicate one entry in
+- * page directory.
+- */
+- cr4 = read_cr4_safe();
+
+- if (cr4 & X86_CR4_PAE) {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- swapper_pg_dir[0].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- } else {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- efi_bak_pg_dir_pointer[1].pgd =
+- swapper_pg_dir[pgd_index(0x400000)].pgd;
+- swapper_pg_dir[pgd_index(0)].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- temp = PAGE_OFFSET + 0x400000;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- swapper_pg_dir[pgd_index(temp)].pgd;
+- }
++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+ __flush_tlb_all();
+
+- gdt_descr.address = __pa(get_cpu_gdt_table(0));
++ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ }
+
+-void efi_call_phys_epilog(void)
++void __init efi_call_phys_epilog(void)
+ {
+- unsigned long cr4;
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
++ gdt_descr.address = get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- } else {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- efi_bak_pg_dir_pointer[1].pgd;
+- }
++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
+
+ /*
+ * After the lock is released, the original page table is restored.
+diff -urNp linux-2.6.30.8/arch/x86/kernel/efi_stub_32.S linux-2.6.30.8/arch/x86/kernel/efi_stub_32.S
+--- linux-2.6.30.8/arch/x86/kernel/efi_stub_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/efi_stub_32.S 2009-07-30 09:48:09.944948217 -0400
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
+
+ /*
+@@ -20,7 +21,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++ jmp 1f-__PAGE_OFFSET
+ 1:
+
+ /*
+@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++ jmp 1f+__PAGE_OFFSET
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff -urNp linux-2.6.30.8/arch/x86/kernel/entry_32.S linux-2.6.30.8/arch/x86/kernel/entry_32.S
+--- linux-2.6.30.8/arch/x86/kernel/entry_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/entry_32.S 2009-08-12 21:15:21.098460043 -0400
+@@ -192,7 +192,7 @@
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -225,7 +225,7 @@
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -233,6 +233,21 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#ifdef CONFIG_PAX_KERNEXEC
++ __SAVE_ALL __KERNEL_DS
++ GET_CR0_INTO_EDX;
++ movl %edx, %esi;
++ orl $X86_CR0_WP, %edx;
++ xorl %edx, %esi;
++ SET_CR0_FROM_EDX
++#elif defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+@@ -330,6 +345,11 @@ ENTRY(ret_from_fork)
+ CFI_ADJUST_CFA_OFFSET 4
+ popfl
+ CFI_ADJUST_CFA_OFFSET -4
++
++#ifdef CONFIG_PAX_KERNEXEC
++ xorl %esi, %esi
++#endif
++
+ jmp syscall_exit
+ CFI_ENDPROC
+ END(ret_from_fork)
+@@ -353,7 +373,17 @@ check_userspace:
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ GET_CR0_INTO_EDX
++ xorl %esi, %edx
++ SET_CR0_FROM_EDX
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -415,10 +445,9 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ GET_THREAD_INFO(%ebp)
++ pushl TI_sysenter_return(%ebp)
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+@@ -431,9 +460,19 @@ sysenter_past_esp:
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -456,12 +495,23 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call pax_randomize_kstack
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++#endif
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -505,11 +555,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -539,6 +595,10 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ restore_all:
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+@@ -631,25 +691,19 @@ work_resched:
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+@@ -684,6 +738,10 @@ END(syscall_exit_work)
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+@@ -717,7 +775,13 @@ PTREGSCALL(vm86old)
+
+ .macro FIXUP_ESPFIX_STACK
+ /* since we are on a wrong stack, we cant make it a C code :( */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+ addl %esp, %eax
+ pushl $__KERNEL_DS
+@@ -1176,7 +1240,6 @@ return_to_handler:
+ ret
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1228,12 +1291,21 @@ error_code:
+ movl %ecx, %fs
+ UNWIND_ESPFIX_STACK
+ GS_TO_REG %ecx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_EDX
++ movl %edx, %esi
++ orl $X86_CR0_WP, %edx
++ xorl %edx, %esi
++ SET_CR0_FROM_EDX
++#endif
++
+ movl PT_GS(%esp), %edi # get the function address
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ TRACE_IRQS_OFF
+@@ -1329,6 +1401,13 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_EDX
++ xorl %esi, %edx
++ SET_CR0_FROM_EDX
++#endif
++
+ jmp restore_nocheck_notrace
+ CFI_ENDPROC
+
+@@ -1369,6 +1448,13 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_EDX
++ xorl %esi, %edx
++ SET_CR0_FROM_EDX
++#endif
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+diff -urNp linux-2.6.30.8/arch/x86/kernel/entry_64.S linux-2.6.30.8/arch/x86/kernel/entry_64.S
+--- linux-2.6.30.8/arch/x86/kernel/entry_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/entry_64.S 2009-08-12 21:15:21.099483377 -0400
+@@ -1073,7 +1073,12 @@ ENTRY(\sym)
+ TRACE_IRQS_OFF
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+- PER_CPU(init_tss, %rbp)
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
++ lea init_tss(%rbp), %rbp
++#else
++ lea init_tss(%rip), %rbp
++#endif
+ subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+diff -urNp linux-2.6.30.8/arch/x86/kernel/ftrace.c linux-2.6.30.8/arch/x86/kernel/ftrace.c
+--- linux-2.6.30.8/arch/x86/kernel/ftrace.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/ftrace.c 2009-07-30 09:48:09.945662533 -0400
+@@ -284,9 +284,9 @@ int ftrace_update_ftrace_func(ftrace_fun
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+- ret = ftrace_modify_code(ip, old, new);
++ ret = ftrace_modify_code(ktla_ktva(ip), old, new);
+
+ return ret;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/kernel/head32.c linux-2.6.30.8/arch/x86/kernel/head32.c
+--- linux-2.6.30.8/arch/x86/kernel/head32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/head32.c 2009-07-30 09:48:09.946846946 -0400
+@@ -13,12 +13,13 @@
+ #include <asm/e820.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/trampoline.h>
++#include <asm/boot.h>
+
+ void __init i386_start_kernel(void)
+ {
+ reserve_trampoline_memory();
+
+- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff -urNp linux-2.6.30.8/arch/x86/kernel/head_32.S linux-2.6.30.8/arch/x86/kernel/head_32.S
+--- linux-2.6.30.8/arch/x86/kernel/head_32.S 2009-09-26 23:07:15.401011409 -0400
++++ linux-2.6.30.8/arch/x86/kernel/head_32.S 2009-09-26 23:07:52.470809980 -0400
+@@ -20,6 +20,7 @@
+ #include <asm/setup.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/msr-index.h>
+
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+@@ -53,11 +54,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Enough space to fit pagetables for the low memory linear map */
+ MAPPING_BEYOND_END = \
+@@ -74,6 +71,15 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++.section .text.startup,"ax",@progbits
++ ljmp $(__BOOT_CS),$phys_startup_32
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -81,6 +87,12 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ .section .text.head,"ax",@progbits
++
++#ifdef CONFIG_PAX_KERNEXEC
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill 4096,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ /* test KEEP_SEGMENTS flag to see if the bootloader is asking
+ us to not reload segments */
+@@ -98,6 +110,48 @@ ENTRY(startup_32)
+ movl %eax,%gs
+ 2:
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $KERNEL_TEXT_OFFSET,%eax
++ movw %ax,__BOOT_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__BOOT_CS + 4(%edi)
++ movb %ah,__BOOT_CS + 7(%edi)
++ rorl $16,%eax
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movw %ax,__KERNEL_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_CS + 4(%edi)
++ movb %ah,__KERNEL_CS + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -141,9 +195,7 @@ ENTRY(startup_32)
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -155,9 +207,9 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
++ .long pa(default_entry) /* normal x86/PC */
++ .long pa(lguest_entry) /* lguest hypervisor */
++ .long pa(xen_entry) /* Xen hypervisor */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #endif /* CONFIG_PARAVIRT */
+@@ -218,8 +270,11 @@ default_entry:
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -249,8 +304,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(swapper_pg_dir+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
++#else
++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
++#endif
+ #endif
+ jmp 3f
+ /*
+@@ -314,13 +372,16 @@ ENTRY(startup_32_smp)
+ jnc 6f
+
+ /* Setup EFER (Extended Feature Enable Register) */
+- movl $0xc0000080, %ecx
++ movl $MSR_EFER, %ecx
+ rdmsr
+
+ btsl $11, %eax
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++ movl $1,pa(nx_enabled)
++
+ 6:
+
+ /*
+@@ -346,9 +407,7 @@ ENTRY(startup_32_smp)
+
+ #ifdef CONFIG_SMP
+ cmpb $0, ready
+- jz 1f /* Initial CPU cleans BSS */
+- jmp checkCPUtype
+-1:
++ jnz checkCPUtype /* Initial CPU cleans BSS */
+ #endif /* CONFIG_SMP */
+
+ /*
+@@ -426,7 +485,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -440,8 +499,11 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $per_cpu__gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $per_cpu__stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+@@ -459,10 +521,6 @@ is386: movl $2,%ecx # set MP
+ #ifdef CONFIG_SMP
+ movb ready, %cl
+ movb $1, ready
+- cmpb $0,%cl # the first CPU calls start_kernel
+- je 1f
+- movl (stack_start), %esp
+-1:
+ #endif /* CONFIG_SMP */
+ jmp *(initial_code)
+
+@@ -548,22 +606,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -571,8 +629,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -581,9 +642,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -607,37 +665,40 @@ ignore_int:
+ ENTRY(initial_code)
+ .long i386_start_kernel
+
+-.section .text
+-/*
+- * Real beginning of normal "text" segment
+- */
+-ENTRY(stext)
+-ENTRY(_stext)
+-
+ /*
+ * BSS section
+ */
+-.section ".bss.page_aligned","wa"
+- .align PAGE_SIZE_asm
+ #ifdef CONFIG_X86_PAE
++.section .swapper_pg_pmd,"a",@progbits
+ swapper_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+ .fill 1024,4,0
+ #endif
++
+ swapper_pg_fixmap:
+ .fill 1024,4,0
++
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
+
+ /*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
++
++/*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-.section ".data.page_aligned","wa"
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE_asm
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+ .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -660,11 +721,12 @@ ENTRY(swapper_pg_dir)
+
+ .data
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
+ .long __BOOT_DS
+
+ ready: .byte 0
+
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+@@ -700,7 +762,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -711,7 +773,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -720,5 +782,59 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040930000000018 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff -urNp linux-2.6.30.8/arch/x86/kernel/head_64.S linux-2.6.30.8/arch/x86/kernel/head_64.S
+--- linux-2.6.30.8/arch/x86/kernel/head_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/head_64.S 2009-09-05 22:09:36.168173047 -0400
+@@ -39,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ .section .text.head
+@@ -86,35 +90,22 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++ addq %rbp, level3_ident_pgt + 8(%rip)
++ addq %rbp, level3_ident_pgt + 16(%rip)
++ addq %rbp, level3_ident_pgt + 24(%rip)
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -188,6 +179,10 @@ ENTRY(secondary_startup_64)
+ btl $20,%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++ leaq init_level4_pgt(%rip), %rdi
++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -263,16 +258,16 @@ ENTRY(secondary_startup_64)
+ .quad x86_64_start_kernel
+ ENTRY(initial_gs)
+ .quad INIT_PER_CPU_VAR(irq_stack_union)
+- __FINITDATA
+
+ ENTRY(stack_start)
+ .quad init_thread_union+THREAD_SIZE-8
+ .word 0
++ __FINITDATA
+
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -317,18 +312,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
+-#endif /* CONFIG_EARLY_PRINTK */
+ .previous
++#endif /* CONFIG_EARLY_PRINTK */
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -351,13 +351,27 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+- .fill 511,8,0
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .quad level2_ident_pgt + 2*PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .quad level2_ident_pgt + 3*PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 508,8,0
++
++NEXT_PAGE(level3_vmalloc_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -365,20 +379,23 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .fill 507,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++ /* Since I easily can, map the first 4G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 4*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -391,33 +408,49 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++ .quad 0x0 /* unused */
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ .section .bss.page_aligned, "aw", @nobits
+ .align PAGE_SIZE
+diff -urNp linux-2.6.30.8/arch/x86/kernel/i386_ksyms_32.c linux-2.6.30.8/arch/x86/kernel/i386_ksyms_32.c
+--- linux-2.6.30.8/arch/x86/kernel/i386_ksyms_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/i386_ksyms_32.c 2009-07-30 09:48:09.948476455 -0400
+@@ -10,8 +10,12 @@
+ EXPORT_SYMBOL(mcount);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -26,3 +30,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(KERNEL_TEXT_OFFSET);
++#endif
+diff -urNp linux-2.6.30.8/arch/x86/kernel/init_task.c linux-2.6.30.8/arch/x86/kernel/init_task.c
+--- linux-2.6.30.8/arch/x86/kernel/init_task.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/init_task.c 2009-07-30 09:48:09.948476455 -0400
+@@ -40,5 +40,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff -urNp linux-2.6.30.8/arch/x86/kernel/ioport.c linux-2.6.30.8/arch/x86/kernel/ioport.c
+--- linux-2.6.30.8/arch/x86/kernel/ioport.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/ioport.c 2009-07-30 11:10:48.918448854 -0400
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on) {
++ gr_handle_ioperm();
++ return -EPERM;
++ }
++#endif
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
+
+@@ -111,8 +118,13 @@ static int do_iopl(unsigned int level, s
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ gr_handle_iopl();
++ return -EPERM;
++#else
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++#endif
+ }
+ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/irq_32.c linux-2.6.30.8/arch/x86/kernel/irq_32.c
+--- linux-2.6.30.8/arch/x86/kernel/irq_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/irq_32.c 2009-07-30 09:48:09.948476455 -0400
+@@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struc
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+ irqctx->tinfo.task = curctx->tinfo.task;
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+@@ -175,7 +175,7 @@ asmlinkage void do_softirq(void)
+ irqctx->tinfo.previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+
+ call_on_stack(__do_softirq, isp);
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/kernel/kprobes.c linux-2.6.30.8/arch/x86/kernel/kprobes.c
+--- linux-2.6.30.8/arch/x86/kernel/kprobes.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/kprobes.c 2009-07-30 09:48:09.948476455 -0400
+@@ -166,9 +166,24 @@ static void __kprobes set_jmp_op(void *f
+ char op;
+ s32 raddr;
+ } __attribute__((packed)) * jop;
+- jop = (struct __arch_jmp_op *)from;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
++ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ jop->op = RELATIVEJUMP_INSTRUCTION;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ }
+
+ /*
+@@ -345,16 +360,29 @@ static void __kprobes fix_riprel(struct
+
+ static void __kprobes arch_copy_kprobe(struct kprobe *p)
+ {
+- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
+
+ fix_riprel(p);
+
+- if (can_boost(p->addr))
++ if (can_boost(ktla_ktva(p->addr)))
+ p->ainsn.boostable = 0;
+ else
+ p->ainsn.boostable = -1;
+
+- p->opcode = *p->addr;
++ p->opcode = *(ktla_ktva(p->addr));
+ }
+
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+@@ -432,7 +460,7 @@ static void __kprobes prepare_singlestep
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+@@ -453,7 +481,7 @@ static void __kprobes setup_singlestep(s
+ if (p->ainsn.boostable == 1 && !p->post_handler) {
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -523,7 +551,7 @@ static int __kprobes kprobe_handler(stru
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
+- if (*addr != BREAKPOINT_INSTRUCTION) {
++ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -775,7 +803,7 @@ static void __kprobes resume_execution(s
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -958,7 +986,7 @@ int __kprobes kprobe_exceptions_notify(s
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+diff -urNp linux-2.6.30.8/arch/x86/kernel/ldt.c linux-2.6.30.8/arch/x86/kernel/ldt.c
+--- linux-2.6.30.8/arch/x86/kernel/ldt.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/ldt.c 2009-07-30 09:48:09.950015875 -0400
+@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpus_equal(current->mm->cpu_vm_mask,
+ cpumask_of_cpu(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = ~0UL;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/machine_kexec_32.c linux-2.6.30.8/arch/x86/kernel/machine_kexec_32.c
+--- linux-2.6.30.8/arch/x86/kernel/machine_kexec_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/machine_kexec_32.c 2009-07-30 09:48:09.950015875 -0400
+@@ -26,7 +26,7 @@
+ #include <asm/system.h>
+ #include <asm/cacheflush.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff -urNp linux-2.6.30.8/arch/x86/kernel/module_32.c linux-2.6.30.8/arch/x86/kernel/module_32.c
+--- linux-2.6.30.8/arch/x86/kernel/module_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/module_32.c 2009-08-01 15:35:35.138919235 -0400
+@@ -23,6 +23,9 @@
+ #include <linux/kernel.h>
+ #include <linux/bug.h>
+
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++
+ #if 0
+ #define DEBUGP printk
+ #else
+@@ -33,9 +36,31 @@ void *module_alloc(unsigned long size)
+ {
+ if (size == 0)
+ return NULL;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
++#else
+ return vmalloc_exec(size);
++#endif
++
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_VADDR, (unsigned long)&MODULES_END);
++ if (area)
++ return area->addr;
++
++ return NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
+
+ /* Free memory returned from module_alloc */
+ void module_free(struct module *mod, void *module_region)
+@@ -45,6 +70,46 @@ void module_free(struct module *mod, voi
+ table entries. */
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region)
++{
++ struct vm_struct **p, *tmp;
++
++ if (!module_region)
++ return;
++
++ if ((PAGE_SIZE-1) & (unsigned long)module_region) {
++ printk(KERN_ERR "Trying to module_free_exec() bad address (%p)\n", module_region);
++ WARN_ON(1);
++ return;
++ }
++
++ write_lock(&vmlist_lock);
++ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next)
++ if (tmp->addr == module_region)
++ break;
++
++ if (tmp) {
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++ memset(tmp->addr, 0xCC, tmp->size);
++ pax_close_kernel(cr0);
++
++ *p = tmp->next;
++ kfree(tmp);
++ }
++ write_unlock(&vmlist_lock);
++
++ if (!tmp) {
++ printk(KERN_ERR "Trying to module_free_exec() nonexistent vm area (%p)\n",
++ module_region);
++ WARN_ON(1);
++ }
++}
++EXPORT_SYMBOL(module_free_exec);
++#endif
++
+ /* We don't need anything special. */
+ int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+@@ -63,14 +128,20 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -78,12 +149,32 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ *plocation += sym->st_value;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ break;
+ case R_386_PC32:
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ *plocation += sym->st_value - location;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+diff -urNp linux-2.6.30.8/arch/x86/kernel/module_64.c linux-2.6.30.8/arch/x86/kernel/module_64.c
+--- linux-2.6.30.8/arch/x86/kernel/module_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/module_64.c 2009-08-01 15:35:35.161871747 -0400
+@@ -40,7 +40,7 @@ void module_free(struct module *mod, voi
+ table entries. */
+ }
+
+-void *module_alloc(unsigned long size)
++static void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+ struct vm_struct *area;
+
+@@ -54,8 +54,33 @@ void *module_alloc(unsigned long size)
+ if (!area)
+ return NULL;
+
+- return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
++ return __vmalloc_area(area, GFP_KERNEL | __GFP_ZERO, prot);
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL);
++}
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++EXPORT_SYMBOL(module_alloc_exec);
+ }
++#else
++void *module_alloc(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++#endif
++
+ #endif
+
+ /* We don't need anything special. */
+@@ -79,6 +104,10 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ void *loc;
+ u64 val;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+@@ -101,21 +130,61 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(u64 *)loc = val;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ break;
+ case R_X86_64_32:
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(u32 *)loc = val;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(s32 *)loc = val;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(u32 *)loc = val;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/paravirt.c linux-2.6.30.8/arch/x86/kernel/paravirt.c
+--- linux-2.6.30.8/arch/x86/kernel/paravirt.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/paravirt.c 2009-07-30 09:48:09.950702241 -0400
+@@ -54,7 +54,7 @@ u64 _paravirt_ident_64(u64 x)
+ return x;
+ }
+
+-static void __init default_banner(void)
++static void default_banner(void)
+ {
+ printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+ pv_info.name);
+@@ -183,7 +183,7 @@ unsigned paravirt_patch_insns(void *insn
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -313,21 +313,21 @@ void arch_flush_lazy_cpu_mode(void)
+ preempt_enable();
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+ .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ .banner = default_banner,
+ .arch_setup = paravirt_nop,
+ .memory_setup = machine_specific_memory_setup,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .time_init = hpet_time_init,
+ .get_wallclock = native_get_wallclock,
+ .set_wallclock = native_set_wallclock,
+@@ -335,7 +335,7 @@ struct pv_time_ops pv_time_ops = {
+ .get_tsc_khz = native_calibrate_tsc,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .init_IRQ = native_init_IRQ,
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+@@ -348,7 +348,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -410,7 +410,7 @@ struct pv_cpu_ops pv_cpu_ops = {
+ },
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only = {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .setup_boot_clock = setup_boot_APIC_clock,
+ .setup_secondary_clock = setup_secondary_APIC_clock,
+@@ -426,7 +426,7 @@ struct pv_apic_ops pv_apic_ops = {
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+ #ifndef CONFIG_X86_64
+ .pagetable_setup_start = native_pagetable_setup_start,
+ .pagetable_setup_done = native_pagetable_setup_done,
+diff -urNp linux-2.6.30.8/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.30.8/arch/x86/kernel/paravirt-spinlocks.c
+--- linux-2.6.30.8/arch/x86/kernel/paravirt-spinlocks.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/paravirt-spinlocks.c 2009-07-30 09:48:09.950702241 -0400
+@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
+ __raw_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff -urNp linux-2.6.30.8/arch/x86/kernel/process_32.c linux-2.6.30.8/arch/x86/kernel/process_32.c
+--- linux-2.6.30.8/arch/x86/kernel/process_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/process_32.c 2009-07-30 09:48:09.951950745 -0400
+@@ -73,6 +73,7 @@ EXPORT_PER_CPU_SYMBOL(current_task);
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ #ifndef CONFIG_SMP
+@@ -135,7 +136,7 @@ void __show_regs(struct pt_regs *regs, i
+ unsigned short ss, gs;
+ const char *board;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ gs = get_user_gs(regs);
+@@ -216,8 +217,8 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.bx = (unsigned long) fn;
+ regs.dx = (unsigned long) arg;
+
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+ regs.gs = __KERNEL_STACK_CANARY;
+ regs.orig_ax = -1;
+@@ -253,7 +254,7 @@ int copy_thread(unsigned long clone_flag
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+@@ -282,6 +283,7 @@ int copy_thread(unsigned long clone_flag
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS)
++//XXX needs set_fs()?
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->si, 0);
+
+@@ -351,7 +353,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+@@ -379,6 +381,11 @@ __switch_to(struct task_struct *prev_p,
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
++ __set_fs(task_thread_info(next_p)->addr_limit, cpu);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -497,15 +504,27 @@ unsigned long get_wchan(struct task_stru
+ return 0;
+ }
+
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
+ {
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
++ if (!randomize_va_space)
++ return;
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x1EUL;
++ time <<= 2;
++#else
++ time &= 0xFUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
+ }
++#endif
+diff -urNp linux-2.6.30.8/arch/x86/kernel/process_64.c linux-2.6.30.8/arch/x86/kernel/process_64.c
+--- linux-2.6.30.8/arch/x86/kernel/process_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/process_64.c 2009-07-30 09:48:09.951950745 -0400
+@@ -97,7 +97,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -176,7 +176,7 @@ void __show_regs(struct pt_regs *regs, i
+ if (!board)
+ board = "";
+ printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version, board);
+@@ -386,7 +386,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+
+ /* we're going to use this soon, after a few expensive things */
+@@ -545,12 +545,11 @@ unsigned long get_wchan(struct task_stru
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+@@ -659,16 +658,3 @@ long sys_arch_prctl(int code, unsigned l
+ {
+ return do_arch_prctl(current, code, addr);
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+diff -urNp linux-2.6.30.8/arch/x86/kernel/process.c linux-2.6.30.8/arch/x86/kernel/process.c
+--- linux-2.6.30.8/arch/x86/kernel/process.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/process.c 2009-08-05 19:08:00.495411211 -0400
+@@ -71,7 +71,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -105,6 +105,9 @@ void flush_thread(void)
+
+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
++ loadsegment(gs, 0);
++#endif
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+ tsk->thread.debugreg2 = 0;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/ptrace.c linux-2.6.30.8/arch/x86/kernel/ptrace.c
+--- linux-2.6.30.8/arch/x86/kernel/ptrace.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/ptrace.c 2009-07-30 09:48:09.952643339 -0400
+@@ -1374,7 +1374,7 @@ void send_sigtrap(struct task_struct *ts
+ info.si_code = si_code;
+
+ /* User-mode ip? */
+- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
++ info.si_addr = user_mode(regs) ? (void __user *) regs->ip : NULL;
+
+ /* Send us the fake SIGTRAP */
+ force_sig_info(SIGTRAP, &info, tsk);
+diff -urNp linux-2.6.30.8/arch/x86/kernel/reboot.c linux-2.6.30.8/arch/x86/kernel/reboot.c
+--- linux-2.6.30.8/arch/x86/kernel/reboot.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/reboot.c 2009-07-30 09:48:09.952643339 -0400
+@@ -31,7 +31,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_KBD;
+ int reboot_force;
+
+@@ -249,7 +249,7 @@ static struct dmi_system_id __initdata r
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
+ },
+ },
+- { }
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ static int __init reboot_init(void)
+@@ -265,12 +265,12 @@ core_initcall(reboot_init);
+ controller to pulse the CPU reset line, which is more thorough, but
+ doesn't work with at least one type of 486 motherboard. It is easy
+ to stop this code working; hence the copious comments. */
+-static const unsigned long long
+-real_mode_gdt_entries [3] =
++static struct desc_struct
++real_mode_gdt_entries [3] __read_only =
+ {
+- 0x0000000000000000ULL, /* Null descriptor */
+- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
+- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
++ {{{0x00000000, 0x00000000}}}, /* Null descriptor */
++ {{{0x0000ffff, 0x00009b00}}}, /* 16-bit real-mode 64k code at 0x00000000 */
++ {{{0x0100ffff, 0x00009300}}} /* 16-bit real-mode 64k data at 0x00000100 */
+ };
+
+ static const struct desc_ptr
+@@ -319,7 +319,7 @@ static const unsigned char jump_to_bios
+ * specified by the code and length parameters.
+ * We assume that length will aways be less that 100!
+ */
+-void machine_real_restart(const unsigned char *code, int length)
++void machine_real_restart(const unsigned char *code, unsigned int length)
+ {
+ local_irq_disable();
+
+@@ -339,8 +339,8 @@ void machine_real_restart(const unsigned
+ /* Remap the kernel at virtual address zero, as well as offset zero
+ from the kernel segment. This assumes the kernel segment starts at
+ virtual address PAGE_OFFSET. */
+- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * Use `swapper_pg_dir' as our page directory.
+@@ -352,16 +352,15 @@ void machine_real_restart(const unsigned
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* For the switch to real mode, copy some code to low memory. It has
+ to be in the first 64k because it is running in 16-bit mode, and it
+ has to have the same physical and virtual address, because it turns
+ off paging. Copy it near the end of the first page, out of the way
+ of BIOS variables. */
+- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
+- real_mode_switch, sizeof (real_mode_switch));
+- memcpy((void *)(0x1000 - 100), code, length);
++ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
++ memcpy(__va(0x1000 - 100), code, length);
+
+ /* Set up the IDT for real mode. */
+ load_idt(&real_mode_idt);
+diff -urNp linux-2.6.30.8/arch/x86/kernel/setup.c linux-2.6.30.8/arch/x86/kernel/setup.c
+--- linux-2.6.30.8/arch/x86/kernel/setup.c 2009-07-30 20:32:40.383618032 -0400
++++ linux-2.6.30.8/arch/x86/kernel/setup.c 2009-07-30 20:32:47.940599318 -0400
+@@ -740,14 +740,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_data);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/setup_percpu.c linux-2.6.30.8/arch/x86/kernel/setup_percpu.c
+--- linux-2.6.30.8/arch/x86/kernel/setup_percpu.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/setup_percpu.c 2009-08-05 19:08:00.518752374 -0400
+@@ -25,19 +25,17 @@
+ # define DBG(x...)
+ #endif
+
++#ifdef CONFIG_SMP
+ DEFINE_PER_CPU(int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -336,13 +334,15 @@ out_free_ar:
+ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+- struct desc_struct gdt;
+-
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
+- write_gdt_entry(get_cpu_gdt_table(cpu),
+- GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
++ struct desc_struct d, *gdt = get_cpu_gdt_table(cpu);
++ unsigned long base = per_cpu_offset(cpu);
++ const unsigned long limit = VMALLOC_END - base - 1;
++
++ if (limit < 64*1024)
++ pack_descriptor(&d, base, limit, 0x80 | DESCTYPE_S | 0x3, 0x4);
++ else
++ pack_descriptor(&d, base, limit >> PAGE_SHIFT, 0x80 | DESCTYPE_S | 0x3, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
+ #endif
+ }
+
+@@ -381,6 +381,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_x86_32
++ unsigned long canary = per_cpu(stack_canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -408,6 +413,12 @@ void __init setup_per_cpu_areas(void)
+ early_per_cpu_map(x86_cpu_to_node_map, cpu);
+ #endif
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_x86_32
++ if (cpu == boot_cpu_id)
++ per_cpu(stack_canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .data.init
+ * area. Reload any changed state for the boot CPU.
+diff -urNp linux-2.6.30.8/arch/x86/kernel/signal.c linux-2.6.30.8/arch/x86/kernel/signal.c
+--- linux-2.6.30.8/arch/x86/kernel/signal.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/signal.c 2009-07-30 09:48:09.958625901 -0400
+@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -378,7 +378,7 @@ static int __setup_rt_frame(int sig, str
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ restorer = (void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -790,7 +790,7 @@ static void do_signal(struct pt_regs *re
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+diff -urNp linux-2.6.30.8/arch/x86/kernel/smpboot.c linux-2.6.30.8/arch/x86/kernel/smpboot.c
+--- linux-2.6.30.8/arch/x86/kernel/smpboot.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/smpboot.c 2009-07-30 09:48:09.958625901 -0400
+@@ -685,6 +685,10 @@ static int __cpuinit do_boot_cpu(int api
+ .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+ };
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ INIT_WORK(&c_idle.work, do_fork_idle);
+
+ alternatives_smp_switch(1);
+@@ -727,7 +731,17 @@ do_rest:
+ (unsigned long)task_stack_page(c_idle.idle) -
+ KERNEL_STACK_OFFSET + THREAD_SIZE;
+ #endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start.sp = (void *) c_idle.idle->thread.sp;
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/step.c linux-2.6.30.8/arch/x86/kernel/step.c
+--- linux-2.6.30.8/arch/x86/kernel/step.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/step.c 2009-07-30 09:48:09.958625901 -0400
+@@ -23,22 +23,20 @@ unsigned long convert_ip_to_linear(struc
+ * and APM bios ones we just ignore here.
+ */
+ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+- u32 *desc;
++ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
+- addr = -1L; /* bogus selector, access would fault */
++ if (unlikely(seg >= child->mm->context.size))
++ addr = -EINVAL;
+ else {
+- desc = child->mm->context.ldt + seg;
+- base = ((desc[0] >> 16) |
+- ((desc[1] & 0xff) << 16) |
+- (desc[1] & 0xff000000));
++ desc = &child->mm->context.ldt[seg];
++ base = (desc->a >> 16) | ((desc->b & 0xff) << 16) | (desc->b & 0xff000000);
+
+ /* 16-bit code segment? */
+- if (!((desc[1] >> 22) & 1))
++ if (!((desc->b >> 22) & 1))
+ addr &= 0xffff;
+ addr += base;
+ }
+@@ -54,6 +52,9 @@ static int is_setting_trap_flag(struct t
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+@@ -75,7 +76,7 @@ static int is_setting_trap_flag(struct t
+
+ #ifdef CONFIG_X86_64
+ case 0x40 ... 0x4f:
+- if (regs->cs != __USER_CS)
++ if ((regs->cs & 0xffff) != __USER_CS)
+ /* 32-bit mode: register increment */
+ return 0;
+ /* 64-bit mode: REX prefix */
+diff -urNp linux-2.6.30.8/arch/x86/kernel/syscall_table_32.S linux-2.6.30.8/arch/x86/kernel/syscall_table_32.S
+--- linux-2.6.30.8/arch/x86/kernel/syscall_table_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/syscall_table_32.S 2009-07-30 09:48:09.959782846 -0400
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urNp linux-2.6.30.8/arch/x86/kernel/sys_i386_32.c linux-2.6.30.8/arch/x86/kernel/sys_i386_32.c
+--- linux-2.6.30.8/arch/x86/kernel/sys_i386_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/sys_i386_32.c 2009-07-30 09:48:09.958625901 -0400
+@@ -24,6 +24,21 @@
+
+ #include <asm/syscalls.h>
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+@@ -83,6 +98,205 @@ out:
+ return err;
+ }
+
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (pax_task_size - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (!vma || addr + len <= vma->vm_start) {
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++ }
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (pax_task_size - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ vma = find_vma(mm, addr-len);
++ if (!vma || addr <= vma->vm_start)
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr-len);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (!vma || addr+len <= vma->vm_start)
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = vma->vm_start-len;
++ } while (len < vma->vm_start);
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
++}
+
+ struct sel_arg_struct {
+ unsigned long n;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/sys_x86_64.c linux-2.6.30.8/arch/x86/kernel/sys_x86_64.c
+--- linux-2.6.30.8/arch/x86/kernel/sys_x86_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/sys_x86_64.c 2009-07-30 09:48:09.959782846 -0400
+@@ -47,8 +47,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -67,7 +67,7 @@ static void find_start_end(unsigned long
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mm->mmap_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -84,11 +84,15 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+@@ -143,7 +147,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -156,6 +160,10 @@ arch_get_unmapped_area_topdown(struct fi
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+@@ -213,13 +221,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/time_32.c linux-2.6.30.8/arch/x86/kernel/time_32.c
+--- linux-2.6.30.8/arch/x86/kernel/time_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/time_32.c 2009-07-30 09:48:09.959782846 -0400
+@@ -47,22 +47,32 @@ unsigned long profile_pc(struct pt_regs
+ unsigned long pc = instruction_pointer(regs);
+
+ #ifdef CONFIG_SMP
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp = (unsigned long *)&regs->sp;
+
+ /* Return address is either directly at stack pointer
+ or above a saved flags. Eflags has bits 22-31 zero,
+ kernel addresses don't. */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ #endif
++
++ if (!user_mode(regs))
++ pc = ktla_ktva(pc);
++
+ return pc;
+ }
+ EXPORT_SYMBOL(profile_pc);
+diff -urNp linux-2.6.30.8/arch/x86/kernel/time_64.c linux-2.6.30.8/arch/x86/kernel/time_64.c
+--- linux-2.6.30.8/arch/x86/kernel/time_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/time_64.c 2009-07-30 09:48:09.960740129 -0400
+@@ -25,8 +25,6 @@
+ #include <asm/time.h>
+ #include <asm/timer.h>
+
+-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+-
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+@@ -34,7 +32,7 @@ unsigned long profile_pc(struct pt_regs
+ /* Assume the lock function has either no stack frame or a copy
+ of flags from PUSHF
+ Eflags always has bits 22 and up cleared unlike kernel addresses. */
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+ return *(unsigned long *)(regs->bp + sizeof(long));
+ #else
+diff -urNp linux-2.6.30.8/arch/x86/kernel/tls.c linux-2.6.30.8/arch/x86/kernel/tls.c
+--- linux-2.6.30.8/arch/x86/kernel/tls.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/tls.c 2009-07-30 09:48:09.960740129 -0400
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/traps.c linux-2.6.30.8/arch/x86/kernel/traps.c
+--- linux-2.6.30.8/arch/x86/kernel/traps.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/traps.c 2009-07-30 09:48:09.961532028 -0400
+@@ -70,14 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.. We have a special link segment
+- * for this.
+- */
+-gate_desc idt_table[256]
+- __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -115,7 +107,7 @@ static inline void preempt_conditional_c
+ static inline void
+ die_if_kernel(const char *str, struct pt_regs *regs, long err)
+ {
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ die(str, regs, err);
+ }
+ #endif
+@@ -127,7 +119,7 @@ do_trap(int trapnr, int signr, char *str
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -138,7 +130,7 @@ do_trap(int trapnr, int signr, char *str
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -161,7 +153,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -180,6 +172,12 @@ kernel_trap:
+ tsk->thread.trap_no = trapnr;
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -268,14 +266,30 @@ do_general_protection(struct pt_regs *re
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+
+@@ -308,6 +322,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -554,7 +575,7 @@ dotraplinkage void __kprobes do_debug(st
+ }
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto debug_vm86;
+ #endif
+
+@@ -566,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
+ * kernel space (but re-enable TF when returning to user mode).
+ */
+ if (condition & DR_STEP) {
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto clear_TF_reenable;
+ }
+
+@@ -753,7 +774,7 @@ do_simd_coprocessor_error(struct pt_regs
+ * Handle strange cache flush from user space exception
+ * in all other cases. This is undocumented behaviour.
+ */
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
+ return;
+ }
+@@ -782,19 +803,14 @@ do_spurious_interrupt_bug(struct pt_regs
+ #ifdef CONFIG_X86_32
+ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
+ {
+- struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
+ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+ unsigned long new_kesp = kesp - base;
+ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+- __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
++ struct desc_struct ss;
+
+ /* Set up base for espfix segment */
+- desc &= 0x00f0ff0000000000ULL;
+- desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+- ((((__u64)base) << 32) & 0xff00000000000000ULL) |
+- ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
+- (lim_pages & 0xffff);
+- *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
++ pack_descriptor(&ss, base, lim_pages, 0x93, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(smp_processor_id()), GDT_ENTRY_ESPFIX_SS, &ss, DESCTYPE_S);
+
+ return new_kesp;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/kernel/tsc.c linux-2.6.30.8/arch/x86/kernel/tsc.c
+--- linux-2.6.30.8/arch/x86/kernel/tsc.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/tsc.c 2009-07-30 09:48:09.961532028 -0400
+@@ -772,7 +772,7 @@ static struct dmi_system_id __initdata b
+ DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
+ },
+ },
+- {}
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ static void __init check_system_tsc_reliable(void)
+diff -urNp linux-2.6.30.8/arch/x86/kernel/vm86_32.c linux-2.6.30.8/arch/x86/kernel/vm86_32.c
+--- linux-2.6.30.8/arch/x86/kernel/vm86_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/vm86_32.c 2009-07-30 09:48:09.961532028 -0400
+@@ -148,7 +148,7 @@ struct pt_regs *save_v86_state(struct ke
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -324,7 +324,7 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+diff -urNp linux-2.6.30.8/arch/x86/kernel/vmi_32.c linux-2.6.30.8/arch/x86/kernel/vmi_32.c
+--- linux-2.6.30.8/arch/x86/kernel/vmi_32.c 2009-08-24 20:46:56.225563774 -0400
++++ linux-2.6.30.8/arch/x86/kernel/vmi_32.c 2009-08-12 21:15:21.104308164 -0400
+@@ -102,18 +102,43 @@ static unsigned patch_internal(int call,
+ {
+ u64 reloc;
+ struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
+ switch(rel->type) {
+ case VMI_RELOCATION_CALL_REL:
+ BUG_ON(len < 5);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(char *)insnbuf = MNEM_CALL;
+ patch_offset(insnbuf, ip, (unsigned long)rel->eip);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ return 5;
+
+ case VMI_RELOCATION_JUMP_REL:
+ BUG_ON(len < 5);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ *(char *)insnbuf = MNEM_JMP;
+ patch_offset(insnbuf, ip, (unsigned long)rel->eip);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ return 5;
+
+ case VMI_RELOCATION_NOP:
+@@ -404,13 +429,13 @@ static void vmi_set_pud(pud_t *pudp, pud
+
+ static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+- const pte_t pte = { .pte = 0 };
++ const pte_t pte = __pte(0ULL);
+ vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
+ }
+
+ static void vmi_pmd_clear(pmd_t *pmd)
+ {
+- const pte_t pte = { .pte = 0 };
++ const pte_t pte = __pte(0ULL);
+ vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
+ }
+ #endif
+@@ -438,8 +463,8 @@ vmi_startup_ipi_hook(int phys_apicid, un
+ ap.ss = __KERNEL_DS;
+ ap.esp = (unsigned long) start_esp;
+
+- ap.ds = __USER_DS;
+- ap.es = __USER_DS;
++ ap.ds = __KERNEL_DS;
++ ap.es = __KERNEL_DS;
+ ap.fs = __KERNEL_PERCPU;
+ ap.gs = __KERNEL_STACK_CANARY;
+
+@@ -634,12 +659,20 @@ static inline int __init activate_vmi(vo
+ u64 reloc;
+ const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ if (call_vrom_func(vmi_rom, vmi_init) != 0) {
+ printk(KERN_ERR "VMI ROM failed to initialize!");
+ return 0;
+ }
+ savesegment(cs, kernel_cs);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ pv_info.paravirt_enabled = 1;
+ pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
+ pv_info.name = "vmi";
+@@ -830,6 +863,10 @@ static inline int __init activate_vmi(vo
+
+ para_fill(pv_irq_ops.safe_halt, Halt);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ /*
+ * Alternative instruction rewriting doesn't happen soon enough
+ * to convert VMI_IRET to a call instead of a jump; so we have
+diff -urNp linux-2.6.30.8/arch/x86/kernel/vmlinux_32.lds.S linux-2.6.30.8/arch/x86/kernel/vmlinux_32.lds.S
+--- linux-2.6.30.8/arch/x86/kernel/vmlinux_32.lds.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/vmlinux_32.lds.S 2009-08-27 21:11:25.322527521 -0400
+@@ -15,6 +15,20 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#ifdef CONFIG_X86_PAE
++#define PMD_SHIFT 21
++#else
++#define PMD_SHIFT 22
++#endif
++#define PMD_SIZE (1 << PMD_SHIFT)
++
++#ifdef CONFIG_PAX_KERNEXEC
++#define __KERNEL_TEXT_OFFSET (__PAGE_OFFSET + (((____LOAD_PHYSICAL_ADDR + 2*(PMD_SIZE - 1)) - 1) & ~(PMD_SIZE - 1)))
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+ OUTPUT_ARCH(i386)
+@@ -22,82 +36,23 @@ ENTRY(phys_startup_32)
+ jiffies = jiffies_64;
+
+ PHDRS {
+- text PT_LOAD FLAGS(5); /* R_E */
+- data PT_LOAD FLAGS(7); /* RWE */
+- note PT_NOTE FLAGS(0); /* ___ */
++ initdata PT_LOAD FLAGS(6); /* RW_ */
++ percpu PT_LOAD FLAGS(6); /* RW_ */
++ inittext PT_LOAD FLAGS(5); /* R_E */
++ text PT_LOAD FLAGS(5); /* R_E */
++ rodata PT_LOAD FLAGS(4); /* R__ */
++ data PT_LOAD FLAGS(6); /* RW_ */
++ note PT_NOTE FLAGS(0); /* ___ */
+ }
+ SECTIONS
+ {
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+
+- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
+- _text = .; /* Text and read-only data */
+- *(.text.head)
+- } :text = 0x9090
+-
+- /* read-only */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+- . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */
+- *(.text.page_aligned)
+- TEXT_TEXT
+- SCHED_TEXT
+- LOCK_TEXT
+- KPROBES_TEXT
+- IRQENTRY_TEXT
+- *(.fixup)
+- *(.gnu.warning)
+- _etext = .; /* End of text section */
+- } :text = 0x9090
+-
+- NOTES :text :note
+-
+- . = ALIGN(16); /* Exception table */
+- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+- __start___ex_table = .;
+- *(__ex_table)
+- __stop___ex_table = .;
+- } :text = 0x9090
+-
+- RODATA
+-
+- /* writeable */
+- . = ALIGN(PAGE_SIZE);
+- .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
+- DATA_DATA
+- CONSTRUCTORS
+- } :data
+-
+- . = ALIGN(PAGE_SIZE);
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- __nosave_begin = .;
+- *(.data.nosave)
+- . = ALIGN(PAGE_SIZE);
+- __nosave_end = .;
+- }
+-
+- . = ALIGN(PAGE_SIZE);
+- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+- *(.data.page_aligned)
+- *(.data.idt)
+- }
+-
+- . = ALIGN(32);
+- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+- *(.data.cacheline_aligned)
+- }
+-
+- /* rarely changed data like cpu maps */
+- . = ALIGN(32);
+- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+- *(.data.read_mostly)
+- _edata = .; /* End of data section */
+- }
+-
+- . = ALIGN(THREAD_SIZE); /* init_task */
+- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+- *(.data.init_task)
+- }
++ .text.startup : AT(ADDR(.text.startup) - LOAD_OFFSET) {
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET;
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++ *(.text.startup)
++ } :initdata
+
+ /* might get freed after init */
+ . = ALIGN(PAGE_SIZE);
+@@ -115,14 +70,8 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+
+ /* will be freed after init */
+- . = ALIGN(PAGE_SIZE); /* Init code and data */
+- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+- __init_begin = .;
+- _sinittext = .;
+- INIT_TEXT
+- _einittext = .;
+- }
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
++ __init_begin = .;
+ INIT_DATA
+ }
+ . = ALIGN(16);
+@@ -162,11 +111,6 @@ SECTIONS
+ *(.parainstructions)
+ __parainstructions_end = .;
+ }
+- /* .exit.text is discard at runtime, not link time, to deal with references
+- from .altinstructions and .eh_frame */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+@@ -178,12 +122,130 @@ SECTIONS
+ __initramfs_end = .;
+ }
+ #endif
+- PERCPU(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
++ PERCPU_VADDR(0, :percpu)
++ . = ALIGN(PAGE_SIZE);
++ /* freed after init ends here */
++
++ . = ALIGN(PAGE_SIZE); /* Init code and data */
++ .init.text (. - __KERNEL_TEXT_OFFSET) : AT(ADDR(.init.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ _sinittext = .;
++ INIT_TEXT
++ _einittext = .;
++ } :inittext
++
++ /* .exit.text is discard at runtime, not link time, to deal with references
++ from .altinstructions and .eh_frame */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ EXIT_TEXT
++ }
++
++ .filler : AT(ADDR(.filler) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ BYTE(0)
++ . = ALIGN(2*PMD_SIZE) - 1;
++ }
++
+ /* freed after init ends here */
+
++ .text.head : AT(ADDR(.text.head) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ __init_end = . + __KERNEL_TEXT_OFFSET;
++ KERNEL_TEXT_OFFSET = . + __KERNEL_TEXT_OFFSET;
++ _text = .; /* Text and read-only data */
++ *(.text.head)
++ } :text = 0x9090
++
++ /* read-only */
++ .text : AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */
++ *(.text.page_aligned)
++ TEXT_TEXT
++ SCHED_TEXT
++ LOCK_TEXT
++ KPROBES_TEXT
++ IRQENTRY_TEXT
++ *(.fixup)
++ *(.gnu.warning)
++ _etext = .; /* End of text section */
++ } :text = 0x9090
++
++ . += __KERNEL_TEXT_OFFSET;
++
++ . = ALIGN(4096);
++ NOTES :rodata :note
++
++ . = ALIGN(16); /* Exception table */
++ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
++ __start___ex_table = .;
++ *(__ex_table)
++ __stop___ex_table = .;
++ } :rodata
++
++ RO_DATA(PAGE_SIZE)
++
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.swapper_pg_pmd)
++ *(.swapper_pg_dir)
++
++#if defined(CONFIG_PAX_KERNEXEC) && !defined(CONFIG_MODULES)
++ . = ALIGN(PMD_SIZE);
++#endif
++
++ }
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++ MODULES_VADDR = .;
++ BYTE(0)
++ . += (8 * 1024 * 1024);
++ . = ALIGN(PMD_SIZE);
++ MODULES_END = . - 1;
++ }
++#endif
++
++ /* writeable */
++ . = ALIGN(PAGE_SIZE);
++ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
++ _data = .;
++ DATA_DATA
++ CONSTRUCTORS
++ } :data
++
++ . = ALIGN(PAGE_SIZE);
++ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
++ __nosave_begin = .;
++ *(.data.nosave)
++ . = ALIGN(PAGE_SIZE);
++ __nosave_end = .;
++ }
++
++ . = ALIGN(PAGE_SIZE);
++ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
++ *(.data.page_aligned)
++ }
++
++ . = ALIGN(32);
++ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
++ *(.data.cacheline_aligned)
++ }
++
++ /* rarely changed data like cpu maps */
++ . = ALIGN(32);
++ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
++ *(.data.read_mostly)
++ _edata = .; /* End of data section */
++ }
++
++ . = ALIGN(THREAD_SIZE); /* init_task */
++ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
++ *(.data.init_task)
++ }
++
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+- __init_end = .;
+ __bss_start = .; /* BSS */
+ *(.bss.page_aligned)
+ *(.bss)
+diff -urNp linux-2.6.30.8/arch/x86/kernel/vmlinux_64.lds.S linux-2.6.30.8/arch/x86/kernel/vmlinux_64.lds.S
+--- linux-2.6.30.8/arch/x86/kernel/vmlinux_64.lds.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/vmlinux_64.lds.S 2009-08-01 08:46:06.438873305 -0400
+@@ -13,11 +13,11 @@
+ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+-jiffies_64 = jiffies;
++jiffies = jiffies_64;
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
+- data PT_LOAD FLAGS(7); /* RWE */
+- user PT_LOAD FLAGS(7); /* RWE */
++ data PT_LOAD FLAGS(6); /* RW_ */
++ user PT_LOAD FLAGS(5); /* R_E */
+ data.init PT_LOAD FLAGS(7); /* RWE */
+ #ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(7); /* RWE */
+@@ -54,14 +54,18 @@ SECTIONS
+ __stop___ex_table = .;
+ } :text = 0x9090
+
+- RODATA
++ RO_DATA(PAGE_SIZE)
+
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(2*1024*1024); /* Align data segment to PMD size boundary */
++#else
+ . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
++#endif
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++ _data = .;
+ DATA_DATA
+ CONSTRUCTORS
+- _edata = .; /* End of data section */
+ } :data
+
+
+@@ -75,9 +79,28 @@ SECTIONS
+ *(.data.read_mostly)
+ }
+
++ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
++ . = ALIGN(THREAD_SIZE); /* init_task */
++ *(.data.init_task)
++ }
++
++ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
++ . = ALIGN(PAGE_SIZE);
++ *(.data.page_aligned)
++ }
++
++ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
++ . = ALIGN(PAGE_SIZE);
++ __nosave_begin = .;
++ *(.data.nosave)
++ . = ALIGN(PAGE_SIZE);
++ __nosave_end = .;
++ _edata = .; /* End of data section */
++ }
++
+ #define VSYSCALL_ADDR (-10*1024*1024)
+-#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
+-#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
++#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data_nosave) + SIZEOF(.data_nosave) + 4095) & ~(4095))
++#define VSYSCALL_VIRT_ADDR ((ADDR(.data_nosave) + SIZEOF(.data_nosave) + 4095) & ~(4095))
+
+ #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
+ #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
+@@ -108,10 +131,6 @@ SECTIONS
+ .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
+ vgetcpu_mode = VVIRT(.vgetcpu_mode);
+
+- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+- .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
+- jiffies = VVIRT(.jiffies);
+-
+ .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
+ { *(.vsyscall_3) }
+
+@@ -125,16 +144,6 @@ SECTIONS
+ #undef VVIRT_OFFSET
+ #undef VVIRT
+
+- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+- . = ALIGN(THREAD_SIZE); /* init_task */
+- *(.data.init_task)
+- }:data.init
+-
+- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+- . = ALIGN(PAGE_SIZE);
+- *(.data.page_aligned)
+- }
+-
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ /* might get freed after init */
+ . = ALIGN(PAGE_SIZE);
+@@ -144,7 +153,7 @@ SECTIONS
+ __smp_locks_end = .;
+ . = ALIGN(PAGE_SIZE);
+ __smp_alt_end = .;
+- }
++ }:data.init
+
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .; /* paired with __init_end */
+@@ -233,27 +242,20 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- . = ALIGN(PAGE_SIZE);
+- __nosave_begin = .;
+- *(.data.nosave)
+- . = ALIGN(PAGE_SIZE);
+- __nosave_end = .;
+- } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
+-
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+ . = ALIGN(PAGE_SIZE);
+ __bss_start = .; /* BSS */
+ *(.bss.page_aligned)
+ *(.bss)
+ __bss_stop = .;
+- }
++ } :data.init2
+
+ .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
+ . = ALIGN(PAGE_SIZE);
+ __brk_base = . ;
+ . += 64 * 1024 ; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(2*1024*1024);
+ __brk_limit = . ;
+ }
+
+@@ -276,7 +278,6 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/kernel/vsyscall_64.c linux-2.6.30.8/arch/x86/kernel/vsyscall_64.c
+--- linux-2.6.30.8/arch/x86/kernel/vsyscall_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/vsyscall_64.c 2009-07-30 09:48:09.963690654 -0400
+@@ -79,6 +79,7 @@ void update_vsyscall(struct timespec *wa
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* copy vsyscall data */
++ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
+ vsyscall_gtod_data.clock.vread = clock->vread;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+@@ -201,7 +202,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
+ We do this here because otherwise user space would do it on
+ its own in a likely inferior way (no access to jiffies).
+ If you don't like it pass NULL. */
+- if (tcache && tcache->blob[0] == (j = __jiffies)) {
++ if (tcache && tcache->blob[0] == (j = jiffies)) {
+ p = tcache->blob[1];
+ } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
+ /* Load per CPU data from RDTSCP */
+@@ -240,13 +241,13 @@ static ctl_table kernel_table2[] = {
+ .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = vsyscall_sysctl_change },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static ctl_table kernel_root_table2[] = {
+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
+ .child = kernel_table2 },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+ #endif
+
+diff -urNp linux-2.6.30.8/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.30.8/arch/x86/kernel/x8664_ksyms_64.c
+--- linux-2.6.30.8/arch/x86/kernel/x8664_ksyms_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/kernel/x8664_ksyms_64.c 2009-07-30 09:48:09.963690654 -0400
+@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
+
+ EXPORT_SYMBOL(copy_user_generic);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(__copy_from_user_inatomic);
+
+ EXPORT_SYMBOL(copy_page);
+diff -urNp linux-2.6.30.8/arch/x86/kvm/svm.c linux-2.6.30.8/arch/x86/kvm/svm.c
+--- linux-2.6.30.8/arch/x86/kvm/svm.c 2009-09-09 17:37:32.659510414 -0400
++++ linux-2.6.30.8/arch/x86/kvm/svm.c 2009-09-09 17:37:49.750113159 -0400
+@@ -2247,7 +2247,19 @@ static void reload_tss(struct kvm_vcpu *
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ load_TR_desc();
+ }
+
+@@ -2643,7 +2655,7 @@ static int svm_get_mt_mask_shift(void)
+ return 0;
+ }
+
+-static struct kvm_x86_ops svm_x86_ops = {
++static const struct kvm_x86_ops svm_x86_ops = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+ .hardware_setup = svm_hardware_setup,
+diff -urNp linux-2.6.30.8/arch/x86/kvm/vmx.c linux-2.6.30.8/arch/x86/kvm/vmx.c
+--- linux-2.6.30.8/arch/x86/kvm/vmx.c 2009-09-09 17:37:32.662249874 -0400
++++ linux-2.6.30.8/arch/x86/kvm/vmx.c 2009-09-09 17:37:50.180132423 -0400
+@@ -506,9 +506,23 @@ static void reload_tss(void)
+ struct descriptor_table gdt;
+ struct desc_struct *descs;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ kvm_get_gdt(&gdt);
+ descs = (void *)gdt.base;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ load_TR_desc();
+ }
+
+@@ -2214,7 +2228,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+@@ -3516,6 +3530,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "xchg %0, (%%"R"sp) \n\t"
+ "mov %%"R"ax, %c[rax](%0) \n\t"
+@@ -3562,6 +3582,11 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -3580,7 +3605,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
+
+ vmx_update_window_states(vcpu);
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS));
+ vmx->launched = 1;
+
+ intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+@@ -3716,7 +3741,7 @@ static int vmx_get_mt_mask_shift(void)
+ return VMX_EPT_MT_EPTE_SHIFT;
+ }
+
+-static struct kvm_x86_ops vmx_x86_ops = {
++static const struct kvm_x86_ops vmx_x86_ops = {
+ .cpu_has_kvm_support = cpu_has_kvm_support,
+ .disabled_by_bios = vmx_disabled_by_bios,
+ .hardware_setup = hardware_setup,
+diff -urNp linux-2.6.30.8/arch/x86/kvm/x86.c linux-2.6.30.8/arch/x86/kvm/x86.c
+--- linux-2.6.30.8/arch/x86/kvm/x86.c 2009-09-09 17:37:32.667116417 -0400
++++ linux-2.6.30.8/arch/x86/kvm/x86.c 2009-09-09 17:37:50.199443574 -0400
+@@ -73,44 +73,44 @@ static int kvm_dev_ioctl_get_supported_c
+ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index);
+
+-struct kvm_x86_ops *kvm_x86_ops;
++const struct kvm_x86_ops *kvm_x86_ops;
+ EXPORT_SYMBOL_GPL(kvm_x86_ops);
+
+ struct kvm_stats_debugfs_item debugfs_entries[] = {
+- { "pf_fixed", VCPU_STAT(pf_fixed) },
+- { "pf_guest", VCPU_STAT(pf_guest) },
+- { "tlb_flush", VCPU_STAT(tlb_flush) },
+- { "invlpg", VCPU_STAT(invlpg) },
+- { "exits", VCPU_STAT(exits) },
+- { "io_exits", VCPU_STAT(io_exits) },
+- { "mmio_exits", VCPU_STAT(mmio_exits) },
+- { "signal_exits", VCPU_STAT(signal_exits) },
+- { "irq_window", VCPU_STAT(irq_window_exits) },
+- { "nmi_window", VCPU_STAT(nmi_window_exits) },
+- { "halt_exits", VCPU_STAT(halt_exits) },
+- { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+- { "hypercalls", VCPU_STAT(hypercalls) },
+- { "request_irq", VCPU_STAT(request_irq_exits) },
+- { "request_nmi", VCPU_STAT(request_nmi_exits) },
+- { "irq_exits", VCPU_STAT(irq_exits) },
+- { "host_state_reload", VCPU_STAT(host_state_reload) },
+- { "efer_reload", VCPU_STAT(efer_reload) },
+- { "fpu_reload", VCPU_STAT(fpu_reload) },
+- { "insn_emulation", VCPU_STAT(insn_emulation) },
+- { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
+- { "irq_injections", VCPU_STAT(irq_injections) },
+- { "nmi_injections", VCPU_STAT(nmi_injections) },
+- { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
+- { "mmu_pte_write", VM_STAT(mmu_pte_write) },
+- { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
+- { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
+- { "mmu_flooded", VM_STAT(mmu_flooded) },
+- { "mmu_recycled", VM_STAT(mmu_recycled) },
+- { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
+- { "mmu_unsync", VM_STAT(mmu_unsync) },
+- { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
+- { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
+- { "largepages", VM_STAT(lpages) },
++ { "pf_fixed", VCPU_STAT(pf_fixed), NULL },
++ { "pf_guest", VCPU_STAT(pf_guest), NULL },
++ { "tlb_flush", VCPU_STAT(tlb_flush), NULL },
++ { "invlpg", VCPU_STAT(invlpg), NULL },
++ { "exits", VCPU_STAT(exits), NULL },
++ { "io_exits", VCPU_STAT(io_exits), NULL },
++ { "mmio_exits", VCPU_STAT(mmio_exits), NULL },
++ { "signal_exits", VCPU_STAT(signal_exits), NULL },
++ { "irq_window", VCPU_STAT(irq_window_exits), NULL },
++ { "nmi_window", VCPU_STAT(nmi_window_exits), NULL },
++ { "halt_exits", VCPU_STAT(halt_exits), NULL },
++ { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL },
++ { "hypercalls", VCPU_STAT(hypercalls), NULL },
++ { "request_irq", VCPU_STAT(request_irq_exits), NULL },
++ { "request_nmi", VCPU_STAT(request_nmi_exits), NULL },
++ { "irq_exits", VCPU_STAT(irq_exits), NULL },
++ { "host_state_reload", VCPU_STAT(host_state_reload), NULL },
++ { "efer_reload", VCPU_STAT(efer_reload), NULL },
++ { "fpu_reload", VCPU_STAT(fpu_reload), NULL },
++ { "insn_emulation", VCPU_STAT(insn_emulation), NULL },
++ { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL },
++ { "irq_injections", VCPU_STAT(irq_injections), NULL },
++ { "nmi_injections", VCPU_STAT(nmi_injections), NULL },
++ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL },
++ { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL },
++ { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL },
++ { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL },
++ { "mmu_flooded", VM_STAT(mmu_flooded), NULL },
++ { "mmu_recycled", VM_STAT(mmu_recycled), NULL },
++ { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL },
++ { "mmu_unsync", VM_STAT(mmu_unsync), NULL },
++ { "mmu_unsync_global", VM_STAT(mmu_unsync_global), NULL },
++ { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL },
++ { "largepages", VM_STAT(lpages), NULL },
+ { NULL }
+ };
+
+@@ -1462,7 +1462,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -2784,10 +2784,10 @@ static struct notifier_block kvmclock_cp
+ .notifier_call = kvmclock_cpufreq_notifier
+ };
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r, cpu;
+- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
++ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
+
+ if (kvm_x86_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+diff -urNp linux-2.6.30.8/arch/x86/lguest/Kconfig linux-2.6.30.8/arch/x86/lguest/Kconfig
+--- linux-2.6.30.8/arch/x86/lguest/Kconfig 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lguest/Kconfig 2009-08-02 09:47:36.165378342 -0400
+@@ -3,6 +3,7 @@ config LGUEST_GUEST
+ select PARAVIRT
+ depends on X86_32
+ depends on !X86_PAE
++ depends on !PAX_KERNEXEC
+ select VIRTIO
+ select VIRTIO_RING
+ select VIRTIO_CONSOLE
+diff -urNp linux-2.6.30.8/arch/x86/lib/checksum_32.S linux-2.6.30.8/arch/x86/lib/checksum_32.S
+--- linux-2.6.30.8/arch/x86/lib/checksum_32.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/checksum_32.S 2009-07-30 09:48:09.967600435 -0400
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp csum_partial_copy_generic
++
++ENTRY(csum_partial_copy_generic_from_user)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %edi
+@@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -343,30 +357,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -408,7 +422,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -421,11 +435,19 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE ebx
+@@ -439,26 +461,41 @@ DST( movb %cl, (%edi) )
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp csum_partial_copy_generic
++
++ENTRY(csum_partial_copy_generic_from_user)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++
++ENTRY(csum_partial_copy_generic)
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+@@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -523,10 +560,18 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %ds
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ popl %es
++ CFI_ADJUST_CFA_OFFSET -4
+ popl %esi
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE esi
+@@ -538,7 +583,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff -urNp linux-2.6.30.8/arch/x86/lib/clear_page_64.S linux-2.6.30.8/arch/x86/lib/clear_page_64.S
+--- linux-2.6.30.8/arch/x86/lib/clear_page_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/clear_page_64.S 2009-07-30 09:48:09.967600435 -0400
+@@ -44,7 +44,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.30.8/arch/x86/lib/copy_page_64.S linux-2.6.30.8/arch/x86/lib/copy_page_64.S
+--- linux-2.6.30.8/arch/x86/lib/copy_page_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/copy_page_64.S 2009-07-30 09:48:09.967600435 -0400
+@@ -104,7 +104,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.30.8/arch/x86/lib/copy_user_64.S linux-2.6.30.8/arch/x86/lib/copy_user_64.S
+--- linux-2.6.30.8/arch/x86/lib/copy_user_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/copy_user_64.S 2009-07-30 09:48:09.967600435 -0400
+@@ -21,7 +21,7 @@
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt-1b /* offset */ /* or alternatively to alt */
+ .previous
+@@ -64,31 +64,6 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- jae bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- jae bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+- CFI_ENDPROC
+-ENDPROC(copy_from_user)
+-
+ ENTRY(copy_user_generic)
+ CFI_STARTPROC
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+@@ -106,6 +81,8 @@ ENDPROC(__copy_from_user_inatomic)
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+diff -urNp linux-2.6.30.8/arch/x86/lib/getuser.S linux-2.6.30.8/arch/x86/lib/getuser.S
+--- linux-2.6.30.8/arch/x86/lib/getuser.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/getuser.S 2009-07-30 09:48:09.967600435 -0400
+@@ -33,6 +33,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
+
+ .text
+ ENTRY(__get_user_1)
+@@ -40,7 +41,19 @@ ENTRY(__get_user_1)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 1: movzb (%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -53,7 +66,19 @@ ENTRY(__get_user_2)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 2: movzwl -1(%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -66,7 +91,19 @@ ENTRY(__get_user_4)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 3: mov -3(%_ASM_AX),%edx
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %eax,%eax
+ ret
+ CFI_ENDPROC
+@@ -89,6 +126,12 @@ ENDPROC(__get_user_8)
+
+ bad_get_user:
+ CFI_STARTPROC
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ pop %ds
++#endif
++
+ xor %edx,%edx
+ mov $(-EFAULT),%_ASM_AX
+ ret
+diff -urNp linux-2.6.30.8/arch/x86/lib/memcpy_64.S linux-2.6.30.8/arch/x86/lib/memcpy_64.S
+--- linux-2.6.30.8/arch/x86/lib/memcpy_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/memcpy_64.S 2009-07-30 09:48:09.968548540 -0400
+@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
+ * It is also a lot simpler. Use this when possible:
+ */
+
+- .section .altinstr_replacement, "ax"
++ .section .altinstr_replacement, "a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.30.8/arch/x86/lib/memset_64.S linux-2.6.30.8/arch/x86/lib/memset_64.S
+--- linux-2.6.30.8/arch/x86/lib/memset_64.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/memset_64.S 2009-07-30 09:48:09.968548540 -0400
+@@ -118,7 +118,7 @@ ENDPROC(__memset)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (memset_c - memset) - (2f - 1b) /* offset */
+ 2:
+diff -urNp linux-2.6.30.8/arch/x86/lib/mmx_32.c linux-2.6.30.8/arch/x86/lib/mmx_32.c
+--- linux-2.6.30.8/arch/x86/lib/mmx_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/mmx_32.c 2009-07-30 09:48:09.968548540 -0400
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
+ {
+ void *p;
+ int i;
++ unsigned long cr0;
+
+ if (unlikely(in_interrupt()))
+ return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n" /* This set is 28 bytes */
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n" /* This set is 28 bytes */
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from));
++ : "=&r" (cr0) : "r" (from) : "ax");
+
+ for ( ; i > 5; i--) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__(
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < (4096-320)/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movntq %%mm0, (%1)\n"
+- " movq 8(%0), %%mm1\n"
+- " movntq %%mm1, 8(%1)\n"
+- " movq 16(%0), %%mm2\n"
+- " movntq %%mm2, 16(%1)\n"
+- " movq 24(%0), %%mm3\n"
+- " movntq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm4\n"
+- " movntq %%mm4, 32(%1)\n"
+- " movq 40(%0), %%mm5\n"
+- " movntq %%mm5, 40(%1)\n"
+- " movq 48(%0), %%mm6\n"
+- " movntq %%mm6, 48(%1)\n"
+- " movq 56(%0), %%mm7\n"
+- " movntq %%mm7, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movntq %%mm0, (%2)\n"
++ " movq 8(%1), %%mm1\n"
++ " movntq %%mm1, 8(%2)\n"
++ " movq 16(%1), %%mm2\n"
++ " movntq %%mm2, 16(%2)\n"
++ " movq 24(%1), %%mm3\n"
++ " movntq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm4\n"
++ " movntq %%mm4, 32(%2)\n"
++ " movq 40(%1), %%mm5\n"
++ " movntq %%mm5, 40(%2)\n"
++ " movq 48(%1), %%mm6\n"
++ " movntq %%mm6, 48(%2)\n"
++ " movq 56(%1), %%mm7\n"
++ " movntq %%mm7, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < 4096/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+diff -urNp linux-2.6.30.8/arch/x86/lib/putuser.S linux-2.6.30.8/arch/x86/lib/putuser.S
+--- linux-2.6.30.8/arch/x86/lib/putuser.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/putuser.S 2009-07-30 09:48:09.969494268 -0400
+@@ -15,6 +15,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
+
+
+ /*
+@@ -39,7 +40,19 @@ ENTRY(__put_user_1)
+ ENTER
+ cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 1: movb %al,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_1)
+@@ -50,7 +63,19 @@ ENTRY(__put_user_2)
+ sub $1,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 2: movw %ax,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_2)
+@@ -61,7 +86,19 @@ ENTRY(__put_user_4)
+ sub $3,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 3: movl %eax,(%_ASM_CX)
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_4)
+@@ -72,16 +109,34 @@ ENTRY(__put_user_8)
+ sub $7,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
++
++#ifdef CONFIG_X86_32
++ pushl $(__USER_DS)
++ popl %ds
++#endif
++
+ 4: mov %_ASM_AX,(%_ASM_CX)
+ #ifdef CONFIG_X86_32
+ 5: movl %edx,4(%_ASM_CX)
+ #endif
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_8)
+
+ bad_put_user:
+ CFI_STARTPROC
++
++#ifdef CONFIG_X86_32
++ pushl %ss
++ popl %ds
++#endif
++
+ movl $-EFAULT,%eax
+ EXIT
+ END(bad_put_user)
+diff -urNp linux-2.6.30.8/arch/x86/lib/usercopy_32.c linux-2.6.30.8/arch/x86/lib/usercopy_32.c
+--- linux-2.6.30.8/arch/x86/lib/usercopy_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/lib/usercopy_32.c 2009-07-30 09:48:09.969494268 -0400
+@@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned
+ * Copy a null terminated string from userspace.
+ */
+
+-#define __do_strncpy_from_user(dst, src, count, res) \
+-do { \
+- int __d0, __d1, __d2; \
+- might_fault(); \
+- __asm__ __volatile__( \
+- " testl %1,%1\n" \
+- " jz 2f\n" \
+- "0: lodsb\n" \
+- " stosb\n" \
+- " testb %%al,%%al\n" \
+- " jz 1f\n" \
+- " decl %1\n" \
+- " jnz 0b\n" \
+- "1: subl %1,%0\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "3: movl %5,%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- _ASM_EXTABLE(0b,3b) \
+- : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
+- "=&D" (__d2) \
+- : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
+- : "memory"); \
+-} while (0)
++static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
++{
++ int __d0, __d1, __d2;
++ long res = -EFAULT;
++
++ might_fault();
++ __asm__ __volatile__(
++ " movw %w10,%%ds\n"
++ " testl %1,%1\n"
++ " jz 2f\n"
++ "0: lodsb\n"
++ " stosb\n"
++ " testb %%al,%%al\n"
++ " jz 1f\n"
++ " decl %1\n"
++ " jnz 0b\n"
++ "1: subl %1,%0\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movl %5,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ _ASM_EXTABLE(0b,3b)
++ : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),
++ "=&D" (__d2)
++ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
++ "r"(__USER_DS)
++ : "memory");
++ return res;
++}
+
+ /**
+ * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
+@@ -85,9 +92,7 @@ do { \
+ long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+- long res;
+- __do_strncpy_from_user(dst, src, count, res);
+- return res;
++ return __do_strncpy_from_user(dst, src, count);
+ }
+ EXPORT_SYMBOL(__strncpy_from_user);
+
+@@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char
+ {
+ long res = -EFAULT;
+ if (access_ok(VERIFY_READ, src, 1))
+- __do_strncpy_from_user(dst, src, count, res);
++ res = __do_strncpy_from_user(dst, src, count);
+ return res;
+ }
+ EXPORT_SYMBOL(strncpy_from_user);
+@@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user);
+ * Zero Userspace
+ */
+
+-#define __do_clear_user(addr,size) \
+-do { \
+- int __d0; \
+- might_fault(); \
+- __asm__ __volatile__( \
+- "0: rep; stosl\n" \
+- " movl %2,%0\n" \
+- "1: rep; stosb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "3: lea 0(%2,%0,4),%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- _ASM_EXTABLE(0b,3b) \
+- _ASM_EXTABLE(1b,2b) \
+- : "=&c"(size), "=&D" (__d0) \
+- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
+-} while (0)
++static unsigned long __do_clear_user(void __user *addr, unsigned long size)
++{
++ int __d0;
++
++ might_fault();
++ __asm__ __volatile__(
++ " movw %w6,%%es\n"
++ "0: rep; stosl\n"
++ " movl %2,%0\n"
++ "1: rep; stosb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
++ ".section .fixup,\"ax\"\n"
++ "3: lea 0(%2,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ _ASM_EXTABLE(0b,3b)
++ _ASM_EXTABLE(1b,2b)
++ : "=&c"(size), "=&D" (__d0)
++ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
++ "r"(__USER_DS));
++ return size;
++}
+
+ /**
+ * clear_user: - Zero a block of memory in user space.
+@@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned lon
+ {
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n))
+- __do_clear_user(to, n);
++ n = __do_clear_user(to, n);
+ return n;
+ }
+ EXPORT_SYMBOL(clear_user);
+@@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user);
+ unsigned long
+ __clear_user(void __user *to, unsigned long n)
+ {
+- __do_clear_user(to, n);
+- return n;
++ return __do_clear_user(to, n);
+ }
+ EXPORT_SYMBOL(__clear_user);
+
+@@ -200,14 +210,17 @@ long strnlen_user(const char __user *s,
+ might_fault();
+
+ __asm__ __volatile__(
++ " movw %w8,%%es\n"
+ " testl %0, %0\n"
+ " jz 3f\n"
+- " andl %0,%%ecx\n"
++ " movl %0,%%ecx\n"
+ "0: repne; scasb\n"
+ " setne %%al\n"
+ " subl %%ecx,%0\n"
+ " addl %0,%%eax\n"
+ "1:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
+ ".section .fixup,\"ax\"\n"
+ "2: xorl %%eax,%%eax\n"
+ " jmp 1b\n"
+@@ -219,7 +232,7 @@ long strnlen_user(const char __user *s,
+ " .long 0b,2b\n"
+ ".previous"
+ :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
+- :"0" (n), "1" (s), "2" (0), "3" (mask)
++ :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
+ :"cc");
+ return res & mask;
+ }
+@@ -227,10 +240,121 @@ EXPORT_SYMBOL(strnlen_user);
+
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " movw %w6, %%es\n"
++ " .align 2,0x90\n"
++ "1: movl 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: movl 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: movl 0(%4), %%eax\n"
++ "4: movl 4(%4), %%edx\n"
++ "5: movl %%eax, %%es:0(%3)\n"
++ "6: movl %%edx, %%es:4(%3)\n"
++ "7: movl 8(%4), %%eax\n"
++ "8: movl 12(%4),%%edx\n"
++ "9: movl %%eax, %%es:8(%3)\n"
++ "10: movl %%edx, %%es:12(%3)\n"
++ "11: movl 16(%4), %%eax\n"
++ "12: movl 20(%4), %%edx\n"
++ "13: movl %%eax, %%es:16(%3)\n"
++ "14: movl %%edx, %%es:20(%3)\n"
++ "15: movl 24(%4), %%eax\n"
++ "16: movl 28(%4), %%edx\n"
++ "17: movl %%eax, %%es:24(%3)\n"
++ "18: movl %%edx, %%es:28(%3)\n"
++ "19: movl 32(%4), %%eax\n"
++ "20: movl 36(%4), %%edx\n"
++ "21: movl %%eax, %%es:32(%3)\n"
++ "22: movl %%edx, %%es:36(%3)\n"
++ "23: movl 40(%4), %%eax\n"
++ "24: movl 44(%4), %%edx\n"
++ "25: movl %%eax, %%es:40(%3)\n"
++ "26: movl %%edx, %%es:44(%3)\n"
++ "27: movl 48(%4), %%eax\n"
++ "28: movl 52(%4), %%edx\n"
++ "29: movl %%eax, %%es:48(%3)\n"
++ "30: movl %%edx, %%es:52(%3)\n"
++ "31: movl 56(%4), %%eax\n"
++ "32: movl 60(%4), %%edx\n"
++ "33: movl %%eax, %%es:56(%3)\n"
++ "34: movl %%edx, %%es:60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; movsl\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; movsb\n"
++ "100:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
++ ".section .fixup,\"ax\"\n"
++ "101: lea 0(%%eax,%0,4),%0\n"
++ " jmp 100b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,100b\n"
++ " .long 2b,100b\n"
++ " .long 3b,100b\n"
++ " .long 4b,100b\n"
++ " .long 5b,100b\n"
++ " .long 6b,100b\n"
++ " .long 7b,100b\n"
++ " .long 8b,100b\n"
++ " .long 9b,100b\n"
++ " .long 10b,100b\n"
++ " .long 11b,100b\n"
++ " .long 12b,100b\n"
++ " .long 13b,100b\n"
++ " .long 14b,100b\n"
++ " .long 15b,100b\n"
++ " .long 16b,100b\n"
++ " .long 17b,100b\n"
++ " .long 18b,100b\n"
++ " .long 19b,100b\n"
++ " .long 20b,100b\n"
++ " .long 21b,100b\n"
++ " .long 22b,100b\n"
++ " .long 23b,100b\n"
++ " .long 24b,100b\n"
++ " .long 25b,100b\n"
++ " .long 26b,100b\n"
++ " .long 27b,100b\n"
++ " .long 28b,100b\n"
++ " .long 29b,100b\n"
++ " .long 30b,100b\n"
++ " .long 31b,100b\n"
++ " .long 32b,100b\n"
++ " .long 33b,100b\n"
++ " .long 34b,100b\n"
++ " .long 35b,100b\n"
++ " .long 36b,100b\n"
++ " .long 37b,100b\n"
++ " .long 99b,101b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "1: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -239,36 +363,36 @@ __copy_user_intel(void __user *to, const
+ " .align 2,0x90\n"
+ "3: movl 0(%4), %%eax\n"
+ "4: movl 4(%4), %%edx\n"
+- "5: movl %%eax, 0(%3)\n"
+- "6: movl %%edx, 4(%3)\n"
++ "5: movl %%eax, %%es:0(%3)\n"
++ "6: movl %%edx, %%es:4(%3)\n"
+ "7: movl 8(%4), %%eax\n"
+ "8: movl 12(%4),%%edx\n"
+- "9: movl %%eax, 8(%3)\n"
+- "10: movl %%edx, 12(%3)\n"
++ "9: movl %%eax, %%es:8(%3)\n"
++ "10: movl %%edx, %%es:12(%3)\n"
+ "11: movl 16(%4), %%eax\n"
+ "12: movl 20(%4), %%edx\n"
+- "13: movl %%eax, 16(%3)\n"
+- "14: movl %%edx, 20(%3)\n"
++ "13: movl %%eax, %%es:16(%3)\n"
++ "14: movl %%edx, %%es:20(%3)\n"
+ "15: movl 24(%4), %%eax\n"
+ "16: movl 28(%4), %%edx\n"
+- "17: movl %%eax, 24(%3)\n"
+- "18: movl %%edx, 28(%3)\n"
++ "17: movl %%eax, %%es:24(%3)\n"
++ "18: movl %%edx, %%es:28(%3)\n"
+ "19: movl 32(%4), %%eax\n"
+ "20: movl 36(%4), %%edx\n"
+- "21: movl %%eax, 32(%3)\n"
+- "22: movl %%edx, 36(%3)\n"
++ "21: movl %%eax, %%es:32(%3)\n"
++ "22: movl %%edx, %%es:36(%3)\n"
+ "23: movl 40(%4), %%eax\n"
+ "24: movl 44(%4), %%edx\n"
+- "25: movl %%eax, 40(%3)\n"
+- "26: movl %%edx, 44(%3)\n"
++ "25: movl %%eax, %%es:40(%3)\n"
++ "26: movl %%edx, %%es:44(%3)\n"
+ "27: movl 48(%4), %%eax\n"
+ "28: movl 52(%4), %%edx\n"
+- "29: movl %%eax, 48(%3)\n"
+- "30: movl %%edx, 52(%3)\n"
++ "29: movl %%eax, %%es:48(%3)\n"
++ "30: movl %%edx, %%es:52(%3)\n"
+ "31: movl 56(%4), %%eax\n"
+ "32: movl 60(%4), %%edx\n"
+- "33: movl %%eax, 56(%3)\n"
+- "34: movl %%edx, 60(%3)\n"
++ "33: movl %%eax, %%es:56(%3)\n"
++ "34: movl %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -282,6 +406,8 @@ __copy_user_intel(void __user *to, const
+ "36: movl %%eax, %0\n"
+ "37: rep; movsb\n"
+ "100:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -328,7 +454,7 @@ __copy_user_intel(void __user *to, const
+ " .long 99b,101b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, cons
+ {
+ int d0, d1;
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, cons
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movl %%eax, 0(%3)\n"
+- " movl %%edx, 4(%3)\n"
++ " movl %%eax, %%es:0(%3)\n"
++ " movl %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movl %%eax, 8(%3)\n"
+- " movl %%edx, 12(%3)\n"
++ " movl %%eax, %%es:8(%3)\n"
++ " movl %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movl %%eax, 16(%3)\n"
+- " movl %%edx, 20(%3)\n"
++ " movl %%eax, %%es:16(%3)\n"
++ " movl %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movl %%eax, 24(%3)\n"
+- " movl %%edx, 28(%3)\n"
++ " movl %%eax, %%es:24(%3)\n"
++ " movl %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movl %%eax, 32(%3)\n"
+- " movl %%edx, 36(%3)\n"
++ " movl %%eax, %%es:32(%3)\n"
++ " movl %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movl %%eax, 40(%3)\n"
+- " movl %%edx, 44(%3)\n"
++ " movl %%eax, %%es:40(%3)\n"
++ " movl %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movl %%eax, 48(%3)\n"
+- " movl %%edx, 52(%3)\n"
++ " movl %%eax, %%es:48(%3)\n"
++ " movl %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movl %%eax, 56(%3)\n"
+- " movl %%edx, 60(%3)\n"
++ " movl %%eax, %%es:56(%3)\n"
++ " movl %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, cons
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+@@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, cons
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing
+ int d0, d1;
+
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movnti %%eax, 0(%3)\n"
+- " movnti %%edx, 4(%3)\n"
++ " movnti %%eax, %%es:0(%3)\n"
++ " movnti %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movnti %%eax, 8(%3)\n"
+- " movnti %%edx, 12(%3)\n"
++ " movnti %%eax, %%es:8(%3)\n"
++ " movnti %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movnti %%eax, 16(%3)\n"
+- " movnti %%edx, 20(%3)\n"
++ " movnti %%eax, %%es:16(%3)\n"
++ " movnti %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movnti %%eax, 24(%3)\n"
+- " movnti %%edx, 28(%3)\n"
++ " movnti %%eax, %%es:24(%3)\n"
++ " movnti %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movnti %%eax, 32(%3)\n"
+- " movnti %%edx, 36(%3)\n"
++ " movnti %%eax, %%es:32(%3)\n"
++ " movnti %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movnti %%eax, 40(%3)\n"
+- " movnti %%edx, 44(%3)\n"
++ " movnti %%eax, %%es:40(%3)\n"
++ " movnti %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movnti %%eax, 48(%3)\n"
+- " movnti %%edx, 52(%3)\n"
++ " movnti %%eax, %%es:48(%3)\n"
++ " movnti %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movnti %%eax, 56(%3)\n"
+- " movnti %%edx, 60(%3)\n"
++ " movnti %%eax, %%es:56(%3)\n"
++ " movnti %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+@@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_n
+ int d0, d1;
+
+ __asm__ __volatile__(
++ " movw %w6, %%ds\n"
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+@@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_n
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+- " movnti %%eax, 0(%3)\n"
+- " movnti %%edx, 4(%3)\n"
++ " movnti %%eax, %%es:0(%3)\n"
++ " movnti %%edx, %%es:4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+- " movnti %%eax, 8(%3)\n"
+- " movnti %%edx, 12(%3)\n"
++ " movnti %%eax, %%es:8(%3)\n"
++ " movnti %%edx, %%es:12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+- " movnti %%eax, 16(%3)\n"
+- " movnti %%edx, 20(%3)\n"
++ " movnti %%eax, %%es:16(%3)\n"
++ " movnti %%edx, %%es:20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+- " movnti %%eax, 24(%3)\n"
+- " movnti %%edx, 28(%3)\n"
++ " movnti %%eax, %%es:24(%3)\n"
++ " movnti %%edx, %%es:28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+- " movnti %%eax, 32(%3)\n"
+- " movnti %%edx, 36(%3)\n"
++ " movnti %%eax, %%es:32(%3)\n"
++ " movnti %%edx, %%es:36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+- " movnti %%eax, 40(%3)\n"
+- " movnti %%edx, 44(%3)\n"
++ " movnti %%eax, %%es:40(%3)\n"
++ " movnti %%edx, %%es:44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+- " movnti %%eax, 48(%3)\n"
+- " movnti %%edx, 52(%3)\n"
++ " movnti %%eax, %%es:48(%3)\n"
++ " movnti %%edx, %%es:52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+- " movnti %%eax, 56(%3)\n"
+- " movnti %%edx, 60(%3)\n"
++ " movnti %%eax, %%es:56(%3)\n"
++ " movnti %%edx, %%es:60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_n
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: jmp 8b\n"
+@@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_n
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+- : "1"(to), "2"(from), "0"(size)
++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
+ : "eax", "edx", "memory");
+ return size;
+ }
+@@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_n
+ */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++ unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+ unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+
+ /* Generic arbitrary sized copy. */
+-#define __copy_user(to, from, size) \
+-do { \
+- int __d0, __d1, __d2; \
+- __asm__ __volatile__( \
+- " cmp $7,%0\n" \
+- " jbe 1f\n" \
+- " movl %1,%0\n" \
+- " negl %0\n" \
+- " andl $7,%0\n" \
+- " subl %0,%3\n" \
+- "4: rep; movsb\n" \
+- " movl %3,%0\n" \
+- " shrl $2,%0\n" \
+- " andl $3,%3\n" \
+- " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
+- " movl %3,%0\n" \
+- "1: rep; movsb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "5: addl %3,%0\n" \
+- " jmp 2b\n" \
+- "3: lea 0(%3,%0,4),%0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 4b,5b\n" \
+- " .long 0b,3b\n" \
+- " .long 1b,2b\n" \
+- ".previous" \
+- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
+- : "3"(size), "0"(size), "1"(to), "2"(from) \
+- : "memory"); \
+-} while (0)
+-
+-#define __copy_user_zeroing(to, from, size) \
+-do { \
+- int __d0, __d1, __d2; \
+- __asm__ __volatile__( \
+- " cmp $7,%0\n" \
+- " jbe 1f\n" \
+- " movl %1,%0\n" \
+- " negl %0\n" \
+- " andl $7,%0\n" \
+- " subl %0,%3\n" \
+- "4: rep; movsb\n" \
+- " movl %3,%0\n" \
+- " shrl $2,%0\n" \
+- " andl $3,%3\n" \
+- " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
+- " movl %3,%0\n" \
+- "1: rep; movsb\n" \
+- "2:\n" \
+- ".section .fixup,\"ax\"\n" \
+- "5: addl %3,%0\n" \
+- " jmp 6f\n" \
+- "3: lea 0(%3,%0,4),%0\n" \
+- "6: pushl %0\n" \
+- " pushl %%eax\n" \
+- " xorl %%eax,%%eax\n" \
+- " rep; stosb\n" \
+- " popl %%eax\n" \
+- " popl %0\n" \
+- " jmp 2b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 4b,5b\n" \
+- " .long 0b,3b\n" \
+- " .long 1b,6b\n" \
+- ".previous" \
+- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
+- : "3"(size), "0"(size), "1"(to), "2"(from) \
+- : "memory"); \
+-} while (0)
++static unsigned long
++__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%es\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%es\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 2b\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,2b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%ds\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 2b\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,2b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
++
++static unsigned long
++__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
++{
++ int __d0, __d1, __d2;
++
++ __asm__ __volatile__(
++ " movw %w8,%%ds\n"
++ " cmp $7,%0\n"
++ " jbe 1f\n"
++ " movl %1,%0\n"
++ " negl %0\n"
++ " andl $7,%0\n"
++ " subl %0,%3\n"
++ "4: rep; movsb\n"
++ " movl %3,%0\n"
++ " shrl $2,%0\n"
++ " andl $3,%3\n"
++ " .align 2,0x90\n"
++ "0: rep; movsl\n"
++ " movl %3,%0\n"
++ "1: rep; movsb\n"
++ "2:\n"
++ " pushl %%ss\n"
++ " popl %%ds\n"
++ ".section .fixup,\"ax\"\n"
++ "5: addl %3,%0\n"
++ " jmp 6f\n"
++ "3: lea 0(%3,%0,4),%0\n"
++ "6: pushl %0\n"
++ " pushl %%eax\n"
++ " xorl %%eax,%%eax\n"
++ " rep; stosb\n"
++ " popl %%eax\n"
++ " popl %0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 4b,5b\n"
++ " .long 0b,3b\n"
++ " .long 1b,6b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
++ : "memory");
++ return size;
++}
+
+ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ unsigned long n)
+@@ -775,9 +966,9 @@ survive:
+ }
+ #endif
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ n = __generic_copy_to_user(to, from, n);
+ else
+- n = __copy_user_intel(to, from, n);
++ n = __generic_copy_to_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ else
+ n = __copy_user_zeroing_intel(to, from, n);
+ return n;
+@@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ else
+- n = __copy_user_intel((void __user *)to,
+- (const void *)from, n);
++ n = __generic_copy_from_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
+@@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_zeroing_intel_nocache(to, from, n);
+ else
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ #else
+- __copy_user_zeroing(to, from, n);
++ n = __copy_user_zeroing(to, from, n);
+ #endif
+ return n;
+ }
+@@ -827,59 +1017,37 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);
+ else
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ #else
+- __copy_user(to, from, n);
++ n = __generic_copy_from_user(to, from, n);
+ #endif
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to: Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x, int cpu)
+ {
+- if (access_ok(VERIFY_WRITE, to, n))
+- n = __copy_to_user(to, from, n);
+- return n;
++ unsigned long limit = x.seg;
++ struct desc_struct d;
++
++ current_thread_info()->addr_limit = x;
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, 0UL, limit, 0xF3, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S);
+ }
+-EXPORT_SYMBOL(copy_to_user);
+
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to: Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-copy_from_user(void *to, const void __user *from, unsigned long n)
++void set_fs(mm_segment_t x)
+ {
+- if (access_ok(VERIFY_READ, from, n))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
++ __set_fs(x, get_cpu());
++ put_cpu_no_resched();
+ }
+-EXPORT_SYMBOL(copy_from_user);
++#else
++void set_fs(mm_segment_t x)
++{
++ current_thread_info()->addr_limit = x;
++}
++#endif
++
++EXPORT_SYMBOL(set_fs);
+diff -urNp linux-2.6.30.8/arch/x86/Makefile linux-2.6.30.8/arch/x86/Makefile
+--- linux-2.6.30.8/arch/x86/Makefile 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/Makefile 2009-07-30 09:48:09.917626356 -0400
+@@ -198,3 +198,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff -urNp linux-2.6.30.8/arch/x86/mm/extable.c linux-2.6.30.8/arch/x86/mm/extable.c
+--- linux-2.6.30.8/arch/x86/mm/extable.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/extable.c 2009-07-30 09:48:09.970452952 -0400
+@@ -1,14 +1,62 @@
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
++#include <linux/sort.h>
+ #include <asm/uaccess.h>
+
++/*
++ * The exception table needs to be sorted so that the binary
++ * search that we use to find entries in it works properly.
++ * This is used both for the kernel exception table and for
++ * the exception tables of modules that get loaded.
++ */
++static int cmp_ex(const void *a, const void *b)
++{
++ const struct exception_table_entry *x = a, *y = b;
++
++ /* avoid overflow */
++ if (x->insn > y->insn)
++ return 1;
++ if (x->insn < y->insn)
++ return -1;
++ return 0;
++}
++
++static void swap_ex(void *a, void *b, int size)
++{
++ struct exception_table_entry t, *x = a, *y = b;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
++ t = *x;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ *x = *y;
++ *y = t;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
++}
++
++void sort_extable(struct exception_table_entry *start,
++ struct exception_table_entry *finish)
++{
++ sort(start, finish - start, sizeof(struct exception_table_entry),
++ cmp_ex, swap_ex);
++}
+
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fixup;
+
+ #ifdef CONFIG_PNPBIOS
+- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+diff -urNp linux-2.6.30.8/arch/x86/mm/fault.c linux-2.6.30.8/arch/x86/mm/fault.c
+--- linux-2.6.30.8/arch/x86/mm/fault.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/fault.c 2009-08-05 19:15:53.629625442 -0400
+@@ -27,6 +27,8 @@
+ #include <linux/tty.h>
+ #include <linux/smp.h>
+ #include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm-generic/sections.h>
+
+@@ -37,6 +39,7 @@
+ #include <asm/proto.h>
+ #include <asm/traps.h>
+ #include <asm/desc.h>
++#include <asm/vsyscall.h>
+
+ /*
+ * Page fault error code bits:
+@@ -73,7 +76,7 @@ static inline int notify_page_fault(stru
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+- if (kprobes_built_in() && !user_mode_vm(regs)) {
++ if (kprobes_built_in() && !user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+@@ -193,6 +196,30 @@ force_sig_info_fault(int si_signo, int s
+ force_sig_info(si_signo, &info, tsk);
+ }
+
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd_present(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+
+@@ -571,7 +598,7 @@ static int is_errata93(struct pt_regs *r
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+ return 1;
+ #endif
+ return 0;
+@@ -598,7 +625,7 @@ static int is_f00f_bug(struct pt_regs *r
+ }
+
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -607,15 +634,31 @@ show_fault_oops(struct pt_regs *regs, un
+ if (!oops_may_print())
+ return;
+
+- if (error_code & PF_INSTR) {
++ if (nx_enabled && (error_code & PF_INSTR)) {
+ unsigned int level;
+
+ pte_t *pte = lookup_address(address, &level);
+
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
+- printk(nx_warning, current_uid());
++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++ if (init_mm.start_code <= address && address < (unsigned long)MODULES_END)
++#else
++ if (init_mm.start_code <= address && address < init_mm.end_code)
++#endif
++ {
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ NIPQUAD(current->signal->curr_ip), current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ }
++#endif
++
+ printk(KERN_ALERT "BUG: unable to handle kernel ");
+ if (address < PAGE_SIZE)
+ printk(KERN_CONT "NULL pointer dereference");
+@@ -740,6 +783,68 @@ __bad_area_nosemaphore(struct pt_regs *r
+ unsigned long address, int si_code)
+ {
+ struct task_struct *tsk = current;
++ struct mm_struct *mm = tsk->mm;
++
++#ifdef CONFIG_X86_64
++ if (mm && (error_code & PF_INSTR)) {
++ if (regs->ip == (unsigned long)vgettimeofday) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
++ return;
++ } else if (regs->ip == (unsigned long)vtime) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
++ return;
++ } else if (regs->ip == (unsigned long)vgetcpu) {
++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
++ return;
++ }
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm && (error_code & PF_USER)) {
++ unsigned long ip = regs->ip;
++
++ if (v8086_mode(regs))
++ ip = ((regs->cs & 0xffff) << 4) + (regs->ip & 0xffff);
++
++ /*
++ * It's possible to have interrupts off here:
++ */
++ local_irq_enable();
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
++ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && regs->ip == address))) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (regs->ip + SEGMEXEC_TASK_SIZE == address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
++ }
++#endif
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & PF_USER) {
+@@ -874,6 +979,106 @@ static int spurious_fault_check(unsigned
+ return 1;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++ pte_t *pte;
++ pmd_t *pmd;
++ spinlock_t *ptl;
++ unsigned char pte_mask;
++
++ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
++ return 0;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return 1;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++
++ pmd = pax_get_pmd(mm, address);
++ if (unlikely(!pmd))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++#ifdef CONFIG_SMP
++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++ if (likely(address > get_limit(regs->cs)))
++#endif
++ {
++ set_pte(pte, pte_mkread(*pte));
++ __flush_tlb_one(address);
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ "movw %w4,%%es\n"
++#endif
++ "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg (%0)\n"
++#endif
++ "testb $0,%%es:(%0)\n"
++ "xorb %3,(%1)\n"
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ "pushl %%ss\n"
++ "popl %%es\n"
++#endif
++ :
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
++ : "memory", "cc");
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++}
++#endif
++
+ /*
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+@@ -940,6 +1145,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+ {
++ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++ return 1;
++
+ if (write) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -973,19 +1181,18 @@ do_page_fault(struct pt_regs *regs, unsi
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- unsigned long address;
+ struct mm_struct *mm;
+ int write;
+ int fault;
+
++ /* Get the faulting address: */
++ const unsigned long address = read_cr2();
++
+ tsk = current;
+ mm = tsk->mm;
+
+ prefetchw(&mm->mmap_sem);
+
+- /* Get the faulting address: */
+- address = read_cr2();
+-
+ if (unlikely(kmmio_fault(regs, address)))
+ return;
+
+@@ -1033,7 +1240,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ } else {
+@@ -1085,6 +1292,11 @@ do_page_fault(struct pt_regs *regs, unsi
+ might_sleep();
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++ return;
++#endif
++
+ vma = find_vma(mm, address);
+ if (unlikely(!vma)) {
+ bad_area(regs, error_code, address);
+@@ -1096,18 +1308,24 @@ do_page_fault(struct pt_regs *regs, unsi
+ bad_area(regs, error_code, address);
+ return;
+ }
+- if (error_code & PF_USER) {
+- /*
+- * Accessing the stack below %sp is always a bug.
+- * The large cushion allows instructions like enter
+- * and pusha to work. ("enter $65535, $31" pushes
+- * 32 pointers and then decrements %sp by 65535.)
+- */
+- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+- bad_area(regs, error_code, address);
+- return;
+- }
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535, $31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++ bad_area(regs, error_code, address);
++ return;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++ bad_area(regs, error_code, address);
++ return;
++ }
++#endif
++
+ if (unlikely(expand_stack(vma, address))) {
+ bad_area(regs, error_code, address);
+ return;
+@@ -1146,3 +1364,174 @@ good_area:
+
+ up_read(&mm->mmap_sem);
+ }
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 11) >> 32)
++ break;
++#endif
++
++ err = get_user(mov1, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++ regs->cx = addr1;
++ regs->ax = addr2;
++ regs->ip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB9 && jmp == 0xE9) {
++ regs->cx = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned int addr1;
++ unsigned long addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ if (v8086_mode(regs))
++ return 1;
++
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++
++#ifdef CONFIG_X86_32
++ return pax_handle_fetch_fault_32(regs);
++#else
++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++ return pax_handle_fetch_fault_32(regs);
++ else
++ return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++ for (i = -1; i < 80 / sizeof(long); i++) {
++ unsigned long c;
++ if (get_user(c, (unsigned long __user *)sp+i))
++#ifdef CONFIG_X86_32
++ printk(KERN_CONT "???????? ");
++#else
++ printk(KERN_CONT "???????????????? ");
++#endif
++ else
++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++ }
++ printk("\n");
++}
++#endif
+diff -urNp linux-2.6.30.8/arch/x86/mm/highmem_32.c linux-2.6.30.8/arch/x86/mm/highmem_32.c
+--- linux-2.6.30.8/arch/x86/mm/highmem_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/highmem_32.c 2009-07-30 09:48:09.970452952 -0400
+@@ -32,6 +32,10 @@ void *kmap_atomic_prot(struct page *page
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+
+@@ -43,7 +47,17 @@ void *kmap_atomic_prot(struct page *page
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -59,15 +73,29 @@ void kunmap_atomic(void *kvaddr, enum km
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ /*
+ * Force other mappings to Oops if they'll try to access this pte
+ * without first remap it. Keeping stale mappings around is a bad idea
+ * also, in case the page changes cacheability attributes or becomes
+ * a protected page in a hypervisor.
+ */
+- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+- else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
++ } else {
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr < PAGE_OFFSET);
+ BUG_ON(vaddr >= (unsigned long)high_memory);
+diff -urNp linux-2.6.30.8/arch/x86/mm/hugetlbpage.c linux-2.6.30.8/arch/x86/mm/hugetlbpage.c
+--- linux-2.6.30.8/arch/x86/mm/hugetlbpage.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/hugetlbpage.c 2009-07-30 09:48:09.971412512 -0400
+@@ -267,13 +267,18 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (len > mm->cached_hole_size) {
+- start_addr = mm->free_area_cache;
++ start_addr = mm->free_area_cache;
+ } else {
+- start_addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -281,13 +286,13 @@ full_search:
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+- if (TASK_SIZE - len < addr) {
++ if (pax_task_size - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+@@ -310,9 +315,8 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev_vma;
+- unsigned long base = mm->mmap_base, addr = addr0;
++ unsigned long base = mm->mmap_base, addr;
+ unsigned long largest_hole = mm->cached_hole_size;
+- int first_time = 1;
+
+ /* don't allow allocations above current base */
+ if (mm->free_area_cache > base)
+@@ -322,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
+-try_again:
++
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+@@ -364,22 +368,26 @@ try_again:
+
+ fail:
+ /*
+- * if hint left us with no space for the requested
+- * mapping then try again:
+- */
+- if (first_time) {
+- mm->free_area_cache = base;
+- largest_hole = 0;
+- first_time = 0;
+- goto try_again;
+- }
+- /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
+ len, pgoff, flags);
+@@ -387,6 +395,7 @@ fail:
+ /*
+ * Restore the topdown base:
+ */
++ mm->mmap_base = base;
+ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+@@ -400,10 +409,17 @@ hugetlb_get_unmapped_area(struct file *f
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+- if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+@@ -415,7 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (pax_task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/mm/init_32.c linux-2.6.30.8/arch/x86/mm/init_32.c
+--- linux-2.6.30.8/arch/x86/mm/init_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/init_32.c 2009-07-30 09:48:09.972413251 -0400
+@@ -50,6 +50,7 @@
+ #include <asm/setup.h>
+ #include <asm/cacheflush.h>
+ #include <asm/init.h>
++#include <asm/desc.h>
+
+ unsigned long max_low_pfn_mapped;
+ unsigned long max_pfn_mapped;
+@@ -75,36 +76,6 @@ static __init void *alloc_low_page(void)
+ }
+
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+- pud_t *pud;
+- pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+- if (after_bootmem)
+- pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+- else
+- pmd_table = (pmd_t *)alloc_low_page();
+- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+- return pmd_table;
+- }
+-#endif
+- pud = pud_offset(pgd, 0);
+- pmd_table = pmd_offset(pud, 0);
+-
+- return pmd_table;
+-}
+-
+-/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry:
+ */
+@@ -124,13 +95,28 @@ static pte_t * __init one_page_table_ini
+ page_table = (pte_t *)alloc_low_page();
+
+ paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+ }
+
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++
++ return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+ int pgd_idx = pgd_index(vaddr);
+@@ -204,6 +190,7 @@ page_table_range_init(unsigned long star
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+@@ -213,8 +200,13 @@ page_table_range_init(unsigned long star
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
+- pmd = pmd + pmd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+ pmd++, pmd_idx++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -226,11 +218,23 @@ page_table_range_init(unsigned long star
+ }
+ }
+
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
+- return 1;
+- return 0;
++ unsigned long etext;
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ etext = ktva_ktla((unsigned long)&MODULES_END);
++#else
++ etext = (unsigned long)&_etext;
++#endif
++
++ if ((start > ktla_ktva(etext) ||
++ end <= ktla_ktva((unsigned long)_stext)) &&
++ (start > ktla_ktva((unsigned long)_einittext) ||
++ end <= ktla_ktva((unsigned long)_sinittext)) &&
++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++ return 0;
++ return 1;
+ }
+
+ /*
+@@ -246,9 +250,10 @@ kernel_physical_mapping_init(unsigned lo
+ int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+ unsigned long start_pfn, end_pfn;
+ pgd_t *pgd_base = swapper_pg_dir;
+- int pgd_idx, pmd_idx, pte_ofs;
++ unsigned int pgd_idx, pmd_idx, pte_ofs;
+ unsigned long pfn;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned pages_2m, pages_4k;
+@@ -281,8 +286,13 @@ repeat:
+ pfn = start_pfn;
+ pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++ pud = pud_offset(pgd, 0);
++ pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+
+ if (pfn >= end_pfn)
+ continue;
+@@ -294,14 +304,13 @@ repeat:
+ #endif
+ for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+ pmd++, pmd_idx++) {
+- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /*
+ * Map with big pages if possible, otherwise
+ * create normal page tables:
+ */
+ if (use_pse) {
+- unsigned int addr2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+ /*
+ * first pass will use the same initial
+@@ -311,11 +320,7 @@ repeat:
+ __pgprot(PTE_IDENT_ATTR |
+ _PAGE_PSE);
+
+- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+- PAGE_OFFSET + PAGE_SIZE-1;
+-
+- if (is_kernel_text(addr) ||
+- is_kernel_text(addr2))
++ if (is_kernel_text(address, address + PMD_SIZE))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ pages_2m++;
+@@ -332,7 +337,7 @@ repeat:
+ pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+ /*
+ * first pass will use the same initial
+@@ -340,7 +345,7 @@ repeat:
+ */
+ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+
+- if (is_kernel_text(addr))
++ if (is_kernel_text(address, address + PAGE_SIZE))
+ prot = PAGE_KERNEL_EXEC;
+
+ pages_4k++;
+@@ -492,7 +497,7 @@ void __init native_pagetable_setup_start
+
+ pud = pud_offset(pgd, va);
+ pmd = pmd_offset(pud, va);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ break;
+
+ pte = pte_offset_kernel(pmd, va);
+@@ -544,9 +549,7 @@ void __init early_ioremap_page_table_ran
+
+ static void __init pagetable_init(void)
+ {
+- pgd_t *pgd_base = swapper_pg_dir;
+-
+- permanent_kmaps_init(pgd_base);
++ permanent_kmaps_init(swapper_pg_dir);
+ }
+
+ #ifdef CONFIG_ACPI_SLEEP
+@@ -554,12 +557,12 @@ static void __init pagetable_init(void)
+ * ACPI suspend needs this for resume, because things like the intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+-char swsusp_pg_dir[PAGE_SIZE]
++pgd_t swsusp_pg_dir[PTRS_PER_PGD]
+ __attribute__ ((aligned(PAGE_SIZE)));
+
+ static inline void save_pg_dir(void)
+ {
+- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ static inline void save_pg_dir(void)
+@@ -589,13 +592,11 @@ void zap_low_mappings(void)
+
+ int nx_enabled;
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ #ifdef CONFIG_X86_PAE
+
+-static int disable_nx __initdata;
+-
+ /*
+ * noexec = on|off
+ *
+@@ -604,40 +605,33 @@ static int disable_nx __initdata;
+ * on Enable
+ * off Disable
+ */
++#if !defined(CONFIG_PAX_PAGEEXEC)
+ static int __init noexec_setup(char *str)
+ {
+ if (!str || !strcmp(str, "on")) {
+- if (cpu_has_nx) {
+- __supported_pte_mask |= _PAGE_NX;
+- disable_nx = 0;
+- }
++ if (cpu_has_nx)
++ nx_enabled = 1;
+ } else {
+- if (!strcmp(str, "off")) {
+- disable_nx = 1;
+- __supported_pte_mask &= ~_PAGE_NX;
+- } else {
++ if (!strcmp(str, "off"))
++ nx_enabled = 0;
++ else
+ return -EINVAL;
+- }
+ }
+
+ return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
+
+ void __init set_nx(void)
+ {
+- unsigned int v[4], l, h;
++ if (!nx_enabled && cpu_has_nx) {
++ unsigned l, h;
+
+- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+-
+- if ((v[3] & (1 << 20)) && !disable_nx) {
+- rdmsr(MSR_EFER, l, h);
+- l |= EFER_NX;
+- wrmsr(MSR_EFER, l, h);
+- nx_enabled = 1;
+- __supported_pte_mask |= _PAGE_NX;
+- }
++ __supported_pte_mask &= ~_PAGE_NX;
++ rdmsr(MSR_EFER, l, h);
++ l &= ~EFER_NX;
++ wrmsr(MSR_EFER, l, h);
+ }
+ }
+ #endif
+@@ -934,7 +928,7 @@ void __init mem_init(void)
+ set_highmem_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+@@ -980,10 +974,10 @@ void __init mem_init(void)
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+- (unsigned long)&_etext, (unsigned long)&_edata,
+- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++ (unsigned long)&_data, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_data) >> 10,
+
+- (unsigned long)&_text, (unsigned long)&_etext,
++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+diff -urNp linux-2.6.30.8/arch/x86/mm/init_64.c linux-2.6.30.8/arch/x86/mm/init_64.c
+--- linux-2.6.30.8/arch/x86/mm/init_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/init_64.c 2009-07-30 09:48:09.972413251 -0400
+@@ -202,12 +202,24 @@ void set_pte_vaddr_pud(pud_t *pud_page,
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ pud = pud_page + pud_index(vaddr);
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ set_pte(pte, new_pte);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+@@ -265,14 +277,12 @@ static void __init __init_extra_mapping(
+ pgd = pgd_offset_k((unsigned long)__va(phys));
+ if (pgd_none(*pgd)) {
+ pud = (pud_t *) spp_getpage();
+- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+ }
+ pud = pud_offset(pgd, (unsigned long)__va(phys));
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+ }
+ pmd = pmd_offset(pud, phys);
+ BUG_ON(!pmd_none(*pmd));
+@@ -882,8 +892,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ
+ };
+
+ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+@@ -917,7 +927,7 @@ int in_gate_area_no_task(unsigned long a
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
+ if (vma == &gate_vma)
+ return "[vsyscall]";
+diff -urNp linux-2.6.30.8/arch/x86/mm/init.c linux-2.6.30.8/arch/x86/mm/init.c
+--- linux-2.6.30.8/arch/x86/mm/init.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/init.c 2009-07-30 09:48:09.971412512 -0400
+@@ -348,7 +348,13 @@ unsigned long __init_refok init_memory_m
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr <= 256)
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 1;
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+@@ -396,6 +402,75 @@ void free_init_pages(char *what, unsigne
+
+ void free_initmem(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++
++#ifdef CONFIG_MODULES
++ limit = ktva_ktla((unsigned long)&MODULES_END);
++#else
++ limit = (unsigned long)&_etext;
++#endif
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_data; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#ifdef CONFIG_X86_PAE
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++#endif
++#else
++ unsigned long addr, end;
++
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_data)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_data)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff -urNp linux-2.6.30.8/arch/x86/mm/iomap_32.c linux-2.6.30.8/arch/x86/mm/iomap_32.c
+--- linux-2.6.30.8/arch/x86/mm/iomap_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/iomap_32.c 2009-07-30 09:48:09.973477350 -0400
+@@ -37,12 +37,26 @@ void *kmap_atomic_prot_pfn(unsigned long
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ pagefault_disable();
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff -urNp linux-2.6.30.8/arch/x86/mm/ioremap.c linux-2.6.30.8/arch/x86/mm/ioremap.c
+--- linux-2.6.30.8/arch/x86/mm/ioremap.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/ioremap.c 2009-09-05 22:09:36.204542208 -0400
+@@ -111,8 +111,8 @@ int page_is_ram(unsigned long pagenr)
+ * Second special case: Some BIOSen report the PC BIOS
+ * area (640->1Mb) as ram even though it is not.
+ */
+- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
+- pagenr < (BIOS_END >> PAGE_SHIFT))
++ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
++ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 0;
+
+ for (i = 0; i < e820.nr_map; i++) {
+@@ -207,10 +207,7 @@ static void __iomem *__ioremap_caller(re
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+- for (pfn = phys_addr >> PAGE_SHIFT;
+- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+- pfn++) {
+-
++ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
+ int is_ram = page_is_ram(pfn);
+
+ if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+@@ -272,6 +269,8 @@ static void __iomem *__ioremap_caller(re
+ break;
+ }
+
++ prot = canon_pgprot(prot);
++
+ /*
+ * Ok, go for it..
+ */
+@@ -489,7 +488,7 @@ static int __init early_ioremap_debug_se
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -521,8 +520,7 @@ void __init early_ioremap_init(void)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+- memset(bm_pte, 0, sizeof(bm_pte));
+- pmd_populate_kernel(&init_mm, pmd, bm_pte);
++ pmd_populate_user(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+diff -urNp linux-2.6.30.8/arch/x86/mm/mmap.c linux-2.6.30.8/arch/x86/mm/mmap.c
+--- linux-2.6.30.8/arch/x86/mm/mmap.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/mmap.c 2009-07-30 09:48:09.973477350 -0400
+@@ -36,7 +36,7 @@
+ * Leave an at least ~128 MB hole.
+ */
+ #define MIN_GAP (128*1024*1024)
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+@@ -81,27 +81,40 @@ static unsigned long mmap_rnd(void)
+ return rnd << PAGE_SHIFT;
+ }
+
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+
+ /*
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++static unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+- if (mmap_is_ia32())
++ if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ return SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
+ return TASK_UNMAPPED_BASE;
+- else
++ } else
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+
+@@ -112,11 +125,23 @@ static unsigned long mmap_legacy_base(vo
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mmap_legacy_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
++ mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/mm/numa_32.c linux-2.6.30.8/arch/x86/mm/numa_32.c
+--- linux-2.6.30.8/arch/x86/mm/numa_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/numa_32.c 2009-07-30 09:48:09.974436034 -0400
+@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
+ }
+ #endif
+
+-extern unsigned long find_max_low_pfn(void);
+ extern unsigned long highend_pfn, highstart_pfn;
+
+ #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+diff -urNp linux-2.6.30.8/arch/x86/mm/pageattr.c linux-2.6.30.8/arch/x86/mm/pageattr.c
+--- linux-2.6.30.8/arch/x86/mm/pageattr.c 2009-09-26 23:07:15.439264843 -0400
++++ linux-2.6.30.8/arch/x86/mm/pageattr.c 2009-09-26 23:07:26.531376457 -0400
+@@ -21,6 +21,7 @@
+ #include <asm/pgalloc.h>
+ #include <asm/proto.h>
+ #include <asm/pat.h>
++#include <asm/desc.h>
+
+ /*
+ * The current flushing context - we pass it instead of 5 arguments:
+@@ -265,9 +266,10 @@ static inline pgprot_t static_protection
+ * Does not cover __inittext since that is gone later on. On
+ * 64bit we do not enforce !NX on the low mapping
+ */
+- if (within(address, (unsigned long)_text, (unsigned long)_etext))
++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
+ pgprot_val(forbidden) |= _PAGE_NX;
+
++#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+@@ -275,6 +277,7 @@ static inline pgprot_t static_protection
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+@@ -327,8 +330,20 @@ EXPORT_SYMBOL_GPL(lookup_address);
+ */
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++
++ pax_open_kernel(cr0);
++#endif
++
+ /* change init_mm */
+ set_pte_atomic(kpte, pte);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ #ifdef CONFIG_X86_32
+ if (!SHARED_KERNEL_PMD) {
+ struct page *page;
+diff -urNp linux-2.6.30.8/arch/x86/mm/pageattr-test.c linux-2.6.30.8/arch/x86/mm/pageattr-test.c
+--- linux-2.6.30.8/arch/x86/mm/pageattr-test.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/pageattr-test.c 2009-07-30 09:48:09.974436034 -0400
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff -urNp linux-2.6.30.8/arch/x86/mm/pat.c linux-2.6.30.8/arch/x86/mm/pat.c
+--- linux-2.6.30.8/arch/x86/mm/pat.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/pat.c 2009-07-30 09:48:09.975412278 -0400
+@@ -213,7 +213,7 @@ chk_conflict(struct memtype *new, struct
+
+ conflict:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
++ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
+ new->end, cattr_name(new->type), cattr_name(entry->type));
+ return -EBUSY;
+ }
+@@ -487,7 +487,7 @@ int free_memtype(u64 start, u64 end)
+
+ if (err) {
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
+- current->comm, current->pid, start, end);
++ current->comm, task_pid_nr(current), start, end);
+ }
+
+ dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+@@ -588,7 +588,7 @@ int kernel_map_sync_memtype(u64 base, un
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr,
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+@@ -826,7 +826,7 @@ static int memtype_seq_show(struct seq_f
+ return 0;
+ }
+
+-static struct seq_operations memtype_seq_ops = {
++static const struct seq_operations memtype_seq_ops = {
+ .start = memtype_seq_start,
+ .next = memtype_seq_next,
+ .stop = memtype_seq_stop,
+diff -urNp linux-2.6.30.8/arch/x86/mm/pgtable_32.c linux-2.6.30.8/arch/x86/mm/pgtable_32.c
+--- linux-2.6.30.8/arch/x86/mm/pgtable_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/mm/pgtable_32.c 2009-07-30 09:48:09.975412278 -0400
+@@ -33,6 +33,10 @@ void set_pte_vaddr(unsigned long vaddr,
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
++
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ BUG();
+@@ -49,11 +53,20 @@ void set_pte_vaddr(unsigned long vaddr,
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+diff -urNp linux-2.6.30.8/arch/x86/mm/tlb.c linux-2.6.30.8/arch/x86/mm/tlb.c
+--- linux-2.6.30.8/arch/x86/mm/tlb.c 2009-09-09 17:37:32.711109397 -0400
++++ linux-2.6.30.8/arch/x86/mm/tlb.c 2009-09-09 17:37:50.225398241 -0400
+@@ -12,7 +12,7 @@
+ #include <asm/uv/uv.h>
+
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+- = { &init_mm, 0, };
++ = { &init_mm, 0 };
+
+ /*
+ * Smarter SMP flushing macros.
+diff -urNp linux-2.6.30.8/arch/x86/oprofile/backtrace.c linux-2.6.30.8/arch/x86/oprofile/backtrace.c
+--- linux-2.6.30.8/arch/x86/oprofile/backtrace.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/oprofile/backtrace.c 2009-07-30 09:48:09.975412278 -0400
+@@ -37,7 +37,7 @@ static void backtrace_address(void *data
+ unsigned int *depth = data;
+
+ if ((*depth)--)
+- oprofile_add_trace(addr);
++ oprofile_add_trace(ktla_ktva(addr));
+ }
+
+ static struct stacktrace_ops backtrace_ops = {
+@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
+ {
+ struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned long stack = kernel_stack_pointer(regs);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff -urNp linux-2.6.30.8/arch/x86/oprofile/op_model_p4.c linux-2.6.30.8/arch/x86/oprofile/op_model_p4.c
+--- linux-2.6.30.8/arch/x86/oprofile/op_model_p4.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/oprofile/op_model_p4.c 2009-07-30 09:48:09.976413155 -0400
+@@ -48,7 +48,7 @@ static inline void setup_num_counters(vo
+ #endif
+ }
+
+-static int inline addr_increment(void)
++static inline int addr_increment(void)
+ {
+ #ifdef CONFIG_SMP
+ return smp_num_siblings == 2 ? 2 : 1;
+diff -urNp linux-2.6.30.8/arch/x86/pci/common.c linux-2.6.30.8/arch/x86/pci/common.c
+--- linux-2.6.30.8/arch/x86/pci/common.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/pci/common.c 2009-07-30 09:48:09.976413155 -0400
+@@ -370,7 +370,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
+ },
+ },
+- {}
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ void __init dmi_check_pciprobe(void)
+diff -urNp linux-2.6.30.8/arch/x86/pci/fixup.c linux-2.6.30.8/arch/x86/pci/fixup.c
+--- linux-2.6.30.8/arch/x86/pci/fixup.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/pci/fixup.c 2009-07-30 09:48:09.976413155 -0400
+@@ -364,7 +364,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
+ },
+ },
+- {}
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ /*
+@@ -435,7 +435,7 @@ static const struct dmi_system_id __devi
+ DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
+diff -urNp linux-2.6.30.8/arch/x86/pci/i386.c linux-2.6.30.8/arch/x86/pci/i386.c
+--- linux-2.6.30.8/arch/x86/pci/i386.c 2009-07-30 20:32:40.384629006 -0400
++++ linux-2.6.30.8/arch/x86/pci/i386.c 2009-07-30 20:32:47.941604516 -0400
+@@ -269,7 +269,7 @@ void pcibios_set_master(struct pci_dev *
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+ }
+
+-static struct vm_operations_struct pci_mmap_ops = {
++static const struct vm_operations_struct pci_mmap_ops = {
+ .access = generic_access_phys,
+ };
+
+diff -urNp linux-2.6.30.8/arch/x86/pci/irq.c linux-2.6.30.8/arch/x86/pci/irq.c
+--- linux-2.6.30.8/arch/x86/pci/irq.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/pci/irq.c 2009-07-30 09:48:09.976413155 -0400
+@@ -543,7 +543,7 @@ static __init int intel_router_probe(str
+ static struct pci_device_id __initdata pirq_440gx[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
+- { },
++ { PCI_DEVICE(0, 0) }
+ };
+
+ /* 440GX has a proprietary PIRQ router -- don't use it */
+@@ -1145,7 +1145,7 @@ static struct dmi_system_id __initdata p
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ int __init pcibios_irq_init(void)
+diff -urNp linux-2.6.30.8/arch/x86/pci/pcbios.c linux-2.6.30.8/arch/x86/pci/pcbios.c
+--- linux-2.6.30.8/arch/x86/pci/pcbios.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/pci/pcbios.c 2009-07-30 09:48:09.976413155 -0400
+@@ -56,50 +56,120 @@ union bios32 {
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+-static unsigned long bios32_service(unsigned long service)
++static unsigned long __devinit bios32_service(unsigned long service)
+ {
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
++ struct desc_struct d, *gdt;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
+
+ local_irq_save(flags);
+- __asm__("lcall *(%%edi); cld"
++
++ gdt = get_cpu_gdt_table(smp_processor_id());
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+- "D" (&bios32_indirect));
++ "D" (&bios32_indirect),
++ "r"(__PCIBIOS_DS)
++ : "memory");
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ local_irq_restore(flags);
+
+ switch (return_code) {
+- case 0:
+- return address + entry;
+- case 0x80: /* Not present */
+- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+- return 0;
+- default: /* Shouldn't happen */
+- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+- service, return_code);
++ case 0: {
++ int cpu;
++ unsigned char flags;
++
++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++ printk(KERN_WARNING "bios32_service: not valid\n");
+ return 0;
++ }
++ address = address + PAGE_OFFSET;
++ length += 16UL; /* some BIOSs underreport this... */
++ flags = 4;
++ if (length >= 64*1024*1024) {
++ length >>= PAGE_SHIFT;
++ flags |= 8;
++ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ pack_descriptor(&d, address, length, 0x9b, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, address, length, 0x93, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
++ return entry;
++ }
++ case 0x80: /* Not present */
++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++ return 0;
++ default: /* Shouldn't happen */
++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++ service, return_code);
++ return 0;
+ }
+ }
+
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+
+ static int __devinit check_pcibios(void)
+ {
+@@ -108,11 +178,13 @@ static int __devinit check_pcibios(void)
+ unsigned long flags, pcibios_entry;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++ pci_indirect.address = pcibios_entry;
+
+ local_irq_save(flags);
+- __asm__(
+- "lcall *(%%edi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%edi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -121,7 +193,8 @@ static int __devinit check_pcibios(void)
+ "=b" (ebx),
+ "=c" (ecx)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+- "D" (&pci_indirect)
++ "D" (&pci_indirect),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ local_irq_restore(flags);
+
+@@ -165,7 +238,10 @@ static int pci_bios_read(unsigned int se
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -174,7 +250,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 8 bits, do not trust the
+ * BIOS having done it:
+@@ -182,7 +259,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xff;
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -191,7 +271,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 16 bits, do not trust the
+ * BIOS having done it:
+@@ -199,7 +280,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xffff;
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -208,7 +292,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -231,7 +316,10 @@ static int pci_bios_write(unsigned int s
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -240,10 +328,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -252,10 +344,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -264,7 +360,8 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -368,10 +465,13 @@ struct irq_routing_table * pcibios_get_i
+
+ DBG("PCI: Fetching IRQ routing table... ");
+ __asm__("push %%es\n\t"
++ "movw %w8, %%ds\n\t"
+ "push %%ds\n\t"
+ "pop %%es\n\t"
+- "lcall *(%%esi); cld\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
+ "pop %%es\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -382,7 +482,8 @@ struct irq_routing_table * pcibios_get_i
+ "1" (0),
+ "D" ((long) &opt),
+ "S" (&pci_indirect),
+- "m" (opt)
++ "m" (opt),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+ if (ret & 0xff00)
+@@ -406,7 +507,10 @@ int pcibios_set_irq_routing(struct pci_d
+ {
+ int ret;
+
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w5, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -414,7 +518,8 @@ int pcibios_set_irq_routing(struct pci_d
+ : "0" (PCIBIOS_SET_PCI_HW_INT),
+ "b" ((dev->bus->number << 8) | dev->devfn),
+ "c" ((irq << 8) | (pin + 10)),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff -urNp linux-2.6.30.8/arch/x86/power/cpu_32.c linux-2.6.30.8/arch/x86/power/cpu_32.c
+--- linux-2.6.30.8/arch/x86/power/cpu_32.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/power/cpu_32.c 2009-07-30 09:48:09.976413155 -0400
+@@ -68,7 +68,7 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+
+ set_tss_desc(cpu, t); /*
+ * This just modifies memory; should not be
+diff -urNp linux-2.6.30.8/arch/x86/power/cpu_64.c linux-2.6.30.8/arch/x86/power/cpu_64.c
+--- linux-2.6.30.8/arch/x86/power/cpu_64.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/power/cpu_64.c 2009-07-30 09:48:09.978339754 -0400
+@@ -144,7 +144,11 @@ void restore_processor_state(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr0;
++#endif
+
+ /*
+ * This just modifies memory; should not be necessary. But... This
+@@ -153,8 +157,16 @@ static void fix_processor_context(void)
+ */
+ set_tss_desc(cpu, t);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(cr0);
++#endif
++
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(cr0);
++#endif
++
+ syscall_init(); /* This sets MSR_*STAR and related */
+ load_TR_desc(); /* This does ltr */
+ load_LDT(&current->active_mm->context); /* This does lldt */
+diff -urNp linux-2.6.30.8/arch/x86/vdso/Makefile linux-2.6.30.8/arch/x86/vdso/Makefile
+--- linux-2.6.30.8/arch/x86/vdso/Makefile 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/Makefile 2009-07-30 09:48:09.978339754 -0400
+@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
+ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+
+-VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call ld-option, -Wl$(comma)--hash-style=sysv)
+
+ #
+ # Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
+diff -urNp linux-2.6.30.8/arch/x86/vdso/vclock_gettime.c linux-2.6.30.8/arch/x86/vdso/vclock_gettime.c
+--- linux-2.6.30.8/arch/x86/vdso/vclock_gettime.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/vclock_gettime.c 2009-08-05 19:15:53.673598242 -0400
+@@ -22,24 +22,48 @@
+ #include <asm/hpet.h>
+ #include <asm/unistd.h>
+ #include <asm/io.h>
++#include <asm/fixmap.h>
+ #include "vextern.h"
+
+ #define gtod vdso_vsyscall_gtod_data
+
++notrace noinline long __vdso_fallback_time(long *t)
++{
++ long secs;
++ asm volatile("syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
++ return secs;
++}
++
+ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+ {
+ long ret;
+ asm("syscall" : "=a" (ret) :
+- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
++ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
+ return ret;
+ }
+
++notrace static inline cycle_t __vdso_vread_hpet(void)
++{
++ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
++}
++
++notrace static inline cycle_t __vdso_vread_tsc(void)
++{
++ cycle_t ret = (cycle_t)vget_cycles();
++
++ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
++}
++
+ notrace static inline long vgetns(void)
+ {
+ long v;
+- cycles_t (*vread)(void);
+- vread = gtod->clock.vread;
+- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
++ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
++ v = __vdso_vread_tsc();
++ else
++ v = __vdso_vread_hpet();
++ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
+ return (v * gtod->clock.mult) >> gtod->clock.shift;
+ }
+
+@@ -88,7 +112,9 @@ notrace static noinline int do_monotonic
+
+ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+ {
+- if (likely(gtod->sysctl_enabled && gtod->clock.vread))
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
+ switch (clock) {
+ case CLOCK_REALTIME:
+ return do_realtime(ts);
+@@ -100,10 +126,20 @@ notrace int __vdso_clock_gettime(clockid
+ int clock_gettime(clockid_t, struct timespec *)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
+
+-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+ {
+ long ret;
+- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
++ asm("syscall" : "=a" (ret) :
++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
++ return ret;
++}
++
++notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ if (likely(gtod->sysctl_enabled &&
++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
++ {
+ if (likely(tv != NULL)) {
+ BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
+ offsetof(struct timespec, tv_nsec) ||
+@@ -118,9 +154,7 @@ notrace int __vdso_gettimeofday(struct t
+ }
+ return 0;
+ }
+- asm("syscall" : "=a" (ret) :
+- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+- return ret;
++ return __vdso_fallback_gettimeofday(tv, tz);
+ }
+ int gettimeofday(struct timeval *, struct timezone *)
+ __attribute__((weak, alias("__vdso_gettimeofday")));
+diff -urNp linux-2.6.30.8/arch/x86/vdso/vdso32-setup.c linux-2.6.30.8/arch/x86/vdso/vdso32-setup.c
+--- linux-2.6.30.8/arch/x86/vdso/vdso32-setup.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/vdso32-setup.c 2009-07-30 09:48:09.979439324 -0400
+@@ -226,7 +226,7 @@ static inline void map_compat_vdso(int m
+ void enable_sep_cpu(void)
+ {
+ int cpu = get_cpu();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+@@ -249,7 +249,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct l
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+@@ -358,7 +358,7 @@ int arch_setup_additional_pages(struct l
+ goto up_fail;
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+ current_thread_info()->sysenter_return =
+ VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+
+@@ -384,7 +384,7 @@ static ctl_table abi_table2[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static ctl_table abi_root_table2[] = {
+@@ -394,7 +394,7 @@ static ctl_table abi_root_table2[] = {
+ .mode = 0555,
+ .child = abi_table2
+ },
+- {}
++ { 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
+ };
+
+ static __init int ia32_binfmt_init(void)
+@@ -409,8 +409,14 @@ __initcall(ia32_binfmt_init);
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++ return "[vdso]";
++#endif
++
+ return NULL;
+ }
+
+@@ -419,7 +425,7 @@ struct vm_area_struct *get_gate_vma(stru
+ struct mm_struct *mm = tsk->mm;
+
+ /* Check to see if this task was created in compat vdso mode */
+- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+ return &gate_vma;
+ return NULL;
+ }
+diff -urNp linux-2.6.30.8/arch/x86/vdso/vdso.lds.S linux-2.6.30.8/arch/x86/vdso/vdso.lds.S
+--- linux-2.6.30.8/arch/x86/vdso/vdso.lds.S 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/vdso.lds.S 2009-07-30 09:48:09.978662746 -0400
+@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
+ #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
++
++#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
++VEXTERN(fallback_gettimeofday)
++VEXTERN(fallback_time)
++VEXTERN(getcpu)
++#undef VEXTERN
+diff -urNp linux-2.6.30.8/arch/x86/vdso/vextern.h linux-2.6.30.8/arch/x86/vdso/vextern.h
+--- linux-2.6.30.8/arch/x86/vdso/vextern.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/vextern.h 2009-07-30 09:48:09.979439324 -0400
+@@ -11,6 +11,5 @@
+ put into vextern.h and be referenced as a pointer with vdso prefix.
+ The main kernel later fills in the values. */
+
+-VEXTERN(jiffies)
+ VEXTERN(vgetcpu_mode)
+ VEXTERN(vsyscall_gtod_data)
+diff -urNp linux-2.6.30.8/arch/x86/vdso/vma.c linux-2.6.30.8/arch/x86/vdso/vma.c
+--- linux-2.6.30.8/arch/x86/vdso/vma.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/vdso/vma.c 2009-07-30 09:48:09.979439324 -0400
+@@ -8,6 +8,7 @@
+ #include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/random.h>
++#include <linux/elf.h>
+ #include <asm/vsyscall.h>
+ #include <asm/vgtod.h>
+ #include <asm/proto.h>
+@@ -56,7 +57,7 @@ static int __init init_vdso_vars(void)
+ if (!vbase)
+ goto oom;
+
+- if (memcmp(vbase, "\177ELF", 4)) {
++ if (memcmp(vbase, ELFMAG, SELFMAG)) {
+ printk("VDSO: I'm broken; not ELF\n");
+ vdso_enabled = 0;
+ }
+@@ -65,6 +66,7 @@ static int __init init_vdso_vars(void)
+ *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
+ #include "vextern.h"
+ #undef VEXTERN
++ vunmap(vbase);
+ return 0;
+
+ oom:
+@@ -123,15 +125,8 @@ int arch_setup_additional_pages(struct l
+ if (ret)
+ goto up_fail;
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+-
+-static __init int vdso_setup(char *s)
+-{
+- vdso_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff -urNp linux-2.6.30.8/arch/x86/xen/debugfs.c linux-2.6.30.8/arch/x86/xen/debugfs.c
+--- linux-2.6.30.8/arch/x86/xen/debugfs.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/debugfs.c 2009-07-30 09:48:09.979439324 -0400
+@@ -100,7 +100,7 @@ static int xen_array_release(struct inod
+ return 0;
+ }
+
+-static struct file_operations u32_array_fops = {
++static const struct file_operations u32_array_fops = {
+ .owner = THIS_MODULE,
+ .open = u32_array_open,
+ .release= xen_array_release,
+diff -urNp linux-2.6.30.8/arch/x86/xen/enlighten.c linux-2.6.30.8/arch/x86/xen/enlighten.c
+--- linux-2.6.30.8/arch/x86/xen/enlighten.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/enlighten.c 2009-08-04 17:23:47.808223131 -0400
+@@ -67,8 +67,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+
+ struct shared_info xen_dummy_shared_info;
+
+-void *xen_initial_gdt;
+-
+ /*
+ * Point at some empty memory to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+@@ -454,7 +452,7 @@ static void xen_write_idt_entry(gate_des
+
+ preempt_disable();
+
+- start = __get_cpu_var(idt_desc).address;
++ start = (unsigned long)__get_cpu_var(idt_desc).address;
+ end = start + __get_cpu_var(idt_desc).size + 1;
+
+ xen_mc_flush();
+@@ -962,12 +960,6 @@ asmlinkage void __init xen_start_kernel(
+ */
+ load_percpu_segment(0);
+ #endif
+- /*
+- * The only reliable way to retain the initial address of the
+- * percpu gdt_page is to remember it here, so we can go and
+- * mark it RW later, when the initial percpu area is freed.
+- */
+- xen_initial_gdt = &per_cpu(gdt_page, 0);
+
+ xen_smp_init();
+
+diff -urNp linux-2.6.30.8/arch/x86/xen/Kconfig linux-2.6.30.8/arch/x86/xen/Kconfig
+--- linux-2.6.30.8/arch/x86/xen/Kconfig 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/Kconfig 2009-08-02 09:47:15.079210101 -0400
+@@ -8,6 +8,7 @@ config XEN
+ select PARAVIRT_CLOCK
+ depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
+ depends on X86_CMPXCHG && X86_TSC
++ depends on !PAX_KERNEXEC
+ help
+ This is the Linux Xen port. Enabling this will allow the
+ kernel to boot in a paravirtualized environment under the
+diff -urNp linux-2.6.30.8/arch/x86/xen/mmu.c linux-2.6.30.8/arch/x86/xen/mmu.c
+--- linux-2.6.30.8/arch/x86/xen/mmu.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/mmu.c 2009-09-05 22:09:36.227714282 -0400
+@@ -1716,6 +1716,8 @@ __init pgd_t *xen_setup_kernel_pagetable
+ convert_pfn_mfn(init_level4_pgt);
+ convert_pfn_mfn(level3_ident_pgt);
+ convert_pfn_mfn(level3_kernel_pgt);
++ convert_pfn_mfn(level3_vmalloc_pgt);
++ convert_pfn_mfn(level3_vmemmap_pgt);
+
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+@@ -1734,6 +1736,8 @@ __init pgd_t *xen_setup_kernel_pagetable
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+diff -urNp linux-2.6.30.8/arch/x86/xen/smp.c linux-2.6.30.8/arch/x86/xen/smp.c
+--- linux-2.6.30.8/arch/x86/xen/smp.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/smp.c 2009-07-30 09:48:09.981489035 -0400
+@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
+ {
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+-
+- /* We've switched to the "real" per-cpu gdt, so make sure the
+- old memory can be recycled */
+- make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ xen_setup_vcpu_info_placement();
+ }
+
+@@ -231,8 +226,8 @@ cpu_initialize_context(unsigned int cpu,
+ gdt = get_cpu_gdt_table(cpu);
+
+ ctxt->flags = VGCF_IN_KERNEL;
+- ctxt->user_regs.ds = __USER_DS;
+- ctxt->user_regs.es = __USER_DS;
++ ctxt->user_regs.ds = __KERNEL_DS;
++ ctxt->user_regs.es = __KERNEL_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+diff -urNp linux-2.6.30.8/arch/x86/xen/xen-ops.h linux-2.6.30.8/arch/x86/xen/xen-ops.h
+--- linux-2.6.30.8/arch/x86/xen/xen-ops.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/x86/xen/xen-ops.h 2009-08-04 17:23:47.809460830 -0400
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+diff -urNp linux-2.6.30.8/arch/xtensa/include/asm/atomic.h linux-2.6.30.8/arch/xtensa/include/asm/atomic.h
+--- linux-2.6.30.8/arch/xtensa/include/asm/atomic.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/xtensa/include/asm/atomic.h 2009-07-30 09:48:09.981489035 -0400
+@@ -165,6 +165,9 @@ static inline int atomic_sub_return(int
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1,(v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+
+ /**
+ * atomic_inc - increment atomic variable
+diff -urNp linux-2.6.30.8/arch/xtensa/include/asm/kmap_types.h linux-2.6.30.8/arch/xtensa/include/asm/kmap_types.h
+--- linux-2.6.30.8/arch/xtensa/include/asm/kmap_types.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/arch/xtensa/include/asm/kmap_types.h 2009-07-30 09:48:09.981489035 -0400
+@@ -25,6 +25,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.30.8/crypto/lrw.c linux-2.6.30.8/crypto/lrw.c
+--- linux-2.6.30.8/crypto/lrw.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/crypto/lrw.c 2009-07-30 09:48:09.982442014 -0400
+@@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *par
+ struct priv *ctx = crypto_tfm_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err, i;
+- be128 tmp = { 0 };
++ be128 tmp = { 0, 0 };
+ int bsize = crypto_cipher_blocksize(child);
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+diff -urNp linux-2.6.30.8/Documentation/dontdiff linux-2.6.30.8/Documentation/dontdiff
+--- linux-2.6.30.8/Documentation/dontdiff 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/Documentation/dontdiff 2009-08-04 17:23:49.932547446 -0400
+@@ -3,6 +3,7 @@
+ *.bin
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -49,6 +50,10 @@
+ 53c700_d.h
+ CVS
+ ChangeSet
++GPATH
++GRTAGS
++GSYMS
++GTAGS
+ Image
+ Kerntypes
+ Module.markers
+@@ -76,6 +81,7 @@ btfixupprep
+ build
+ bvmlinux
+ bzImage*
++capflags.c
+ classlist.h*
+ comp*.log
+ compile.h*
+@@ -103,9 +109,11 @@ gen_crc32table
+ gen_init_cpio
+ genksyms
+ *_gray256.c
++hash
+ ihex2fw
+ ikconfig.h*
+ initramfs_data.cpio
++initramfs_data.cpio.bz2
+ initramfs_data.cpio.gz
+ initramfs_list
+ kallsyms
+@@ -164,6 +172,7 @@ setup
+ setup.bin
+ setup.elf
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -187,12 +196,16 @@ version.h*
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
++vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vsyscall.lds
+ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+diff -urNp linux-2.6.30.8/Documentation/kernel-parameters.txt linux-2.6.30.8/Documentation/kernel-parameters.txt
+--- linux-2.6.30.8/Documentation/kernel-parameters.txt 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/Documentation/kernel-parameters.txt 2009-09-05 22:09:36.342979816 -0400
+@@ -1707,6 +1707,12 @@ and is between 256 and 4096 characters.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32.
++
++ pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -urNp linux-2.6.30.8/drivers/acpi/blacklist.c linux-2.6.30.8/drivers/acpi/blacklist.c
+--- linux-2.6.30.8/drivers/acpi/blacklist.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/acpi/blacklist.c 2009-07-30 09:48:09.982442014 -0400
+@@ -71,7 +71,7 @@ static struct acpi_blacklist_item acpi_b
+ {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
+ "Incorrect _ADR", 1},
+
+- {""}
++ {"", "", 0, 0, 0, all_versions, 0}
+ };
+
+ #if CONFIG_ACPI_BLACKLIST_YEAR
+diff -urNp linux-2.6.30.8/drivers/acpi/osl.c linux-2.6.30.8/drivers/acpi/osl.c
+--- linux-2.6.30.8/drivers/acpi/osl.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/acpi/osl.c 2009-07-30 09:48:09.986535027 -0400
+@@ -492,6 +492,8 @@ acpi_os_read_memory(acpi_physical_addres
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
++ if (!virt_addr)
++ return AE_NO_MEMORY;
+ if (!value)
+ value = &dummy;
+
+@@ -520,6 +522,8 @@ acpi_os_write_memory(acpi_physical_addre
+ void __iomem *virt_addr;
+
+ virt_addr = ioremap(phys_addr, width);
++ if (!virt_addr)
++ return AE_NO_MEMORY;
+
+ switch (width) {
+ case 8:
+diff -urNp linux-2.6.30.8/drivers/acpi/processor_core.c linux-2.6.30.8/drivers/acpi/processor_core.c
+--- linux-2.6.30.8/drivers/acpi/processor_core.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/acpi/processor_core.c 2009-07-30 09:48:09.986535027 -0400
+@@ -703,7 +703,7 @@ static int __cpuinit acpi_processor_star
+ return 0;
+ }
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check
+diff -urNp linux-2.6.30.8/drivers/acpi/processor_idle.c linux-2.6.30.8/drivers/acpi/processor_idle.c
+--- linux-2.6.30.8/drivers/acpi/processor_idle.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/acpi/processor_idle.c 2009-07-30 09:48:09.987663767 -0400
+@@ -108,7 +108,7 @@ static struct dmi_system_id __cpuinitdat
+ DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
+ (void *)2},
+- {},
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL},
+ };
+
+
+diff -urNp linux-2.6.30.8/drivers/acpi/video.c linux-2.6.30.8/drivers/acpi/video.c
+--- linux-2.6.30.8/drivers/acpi/video.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/acpi/video.c 2009-07-30 12:06:52.099841502 -0400
+@@ -282,7 +282,7 @@ static int acpi_video_device_brightness_
+ struct file *file);
+ static ssize_t acpi_video_device_write_brightness(struct file *file,
+ const char __user *buffer, size_t count, loff_t *data);
+-static struct file_operations acpi_video_device_brightness_fops = {
++static const struct file_operations acpi_video_device_brightness_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_video_device_brightness_open_fs,
+ .read = seq_read,
+diff -urNp linux-2.6.30.8/drivers/ata/ahci.c linux-2.6.30.8/drivers/ata/ahci.c
+--- linux-2.6.30.8/drivers/ata/ahci.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/ata/ahci.c 2009-07-30 09:48:09.987663767 -0400
+@@ -622,7 +622,7 @@ static const struct pci_device_id ahci_p
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+
+- { } /* terminate list */
++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
+ };
+
+
+diff -urNp linux-2.6.30.8/drivers/ata/ata_piix.c linux-2.6.30.8/drivers/ata/ata_piix.c
+--- linux-2.6.30.8/drivers/ata/ata_piix.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/ata/ata_piix.c 2009-07-30 09:48:09.988577262 -0400
+@@ -293,7 +293,7 @@ static const struct pci_device_id piix_p
+ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+- { } /* terminate list */
++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
+ };
+
+ static struct pci_driver piix_pci_driver = {
+@@ -607,7 +607,7 @@ static const struct ich_laptop ich_lapto
+ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
+ /* end marker */
+- { 0, }
++ { 0, 0, 0 }
+ };
+
+ /**
+@@ -1073,7 +1073,7 @@ static int piix_broken_suspend(void)
+ },
+ },
+
+- { } /* terminate list */
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */
+ };
+ static const char *oemstrs[] = {
+ "Tecra M3,",
+diff -urNp linux-2.6.30.8/drivers/ata/libata-core.c linux-2.6.30.8/drivers/ata/libata-core.c
+--- linux-2.6.30.8/drivers/ata/libata-core.c 2009-09-26 23:07:15.504660661 -0400
++++ linux-2.6.30.8/drivers/ata/libata-core.c 2009-09-26 23:07:26.578141628 -0400
+@@ -896,7 +896,7 @@ static const struct ata_xfer_ent {
+ { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
+ { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
+ { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
+- { -1, },
++ { -1, 0, 0 }
+ };
+
+ /**
+@@ -3135,7 +3135,7 @@ static const struct ata_timing ata_timin
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
+
+- { 0xFF }
++ { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
+@@ -4302,7 +4302,7 @@ static const struct ata_blacklist_entry
+ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
+
+ /* End Marker */
+- { }
++ { NULL, NULL, 0 }
+ };
+
+ static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+diff -urNp linux-2.6.30.8/drivers/atm/adummy.c linux-2.6.30.8/drivers/atm/adummy.c
+--- linux-2.6.30.8/drivers/atm/adummy.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/adummy.c 2009-07-30 09:48:09.989999430 -0400
+@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.30.8/drivers/atm/ambassador.c linux-2.6.30.8/drivers/atm/ambassador.c
+--- linux-2.6.30.8/drivers/atm/ambassador.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/ambassador.c 2009-07-30 09:48:09.990535817 -0400
+@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
+ PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the descriptor
+ kfree (tx_descr);
+@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
+ dump_skb ("<<<", vc, skb);
+
+ // VC layer stats
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsability
+ atm_vcc->push (atm_vcc, skb);
+@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
+ } else {
+ PRINTK (KERN_INFO, "dropped over-size frame");
+ // should we count this?
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ }
+
+ } else {
+@@ -1349,7 +1349,7 @@ static int amb_send (struct atm_vcc * at
+ }
+
+ if (check_area (skb->data, skb->len)) {
+- atomic_inc(&atm_vcc->stats->tx_err);
++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+ return -ENOMEM; // ?
+ }
+
+diff -urNp linux-2.6.30.8/drivers/atm/atmtcp.c linux-2.6.30.8/drivers/atm/atmtcp.c
+--- linux-2.6.30.8/drivers/atm/atmtcp.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/atmtcp.c 2009-07-30 09:48:09.991629377 -0400
+@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ if (dev_data) return 0;
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOLINK;
+ }
+ size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (!new_skb) {
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOBUFS;
+ }
+ hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ return 0;
+ }
+
+@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
+ out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+ read_unlock(&vcc_sklist_lock);
+ if (!out_vcc) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ goto done;
+ }
+ skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
+ __net_timestamp(new_skb);
+ skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+diff -urNp linux-2.6.30.8/drivers/atm/eni.c linux-2.6.30.8/drivers/atm/eni.c
+--- linux-2.6.30.8/drivers/atm/eni.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/eni.c 2009-07-30 09:48:09.991629377 -0400
+@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+ DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+ vcc->dev->number);
+ length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ size);
+ }
+ eff = length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+ vcc->dev->number,vcc->vci,length,size << 2,descr);
+ length = eff = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+ skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -770,7 +770,7 @@ rx_dequeued++;
+ vcc->push(vcc,skb);
+ pushed++;
+ }
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ wake_up(&eni_dev->rx_wait);
+ }
+@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
+ PCI_DMA_TODEVICE);
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb_irq(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+ }
+diff -urNp linux-2.6.30.8/drivers/atm/firestream.c linux-2.6.30.8/drivers/atm/firestream.c
+--- linux-2.6.30.8/drivers/atm/firestream.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/firestream.c 2009-07-30 09:48:09.992530374 -0400
+@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
+ }
+ }
+
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ fs_dprintk (FS_DEBUG_TXMEM, "i");
+ fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
+ #endif
+ skb_put (skb, qe->p1 & 0xffff);
+ ATM_SKB(skb)->vcc = atm_vcc;
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+ atm_vcc->push (atm_vcc, skb);
+@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
+ kfree (pe);
+ }
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ case 0x1f: /* Reassembly abort: no buffers. */
+ /* Silently increment error counter. */
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
+diff -urNp linux-2.6.30.8/drivers/atm/fore200e.c linux-2.6.30.8/drivers/atm/fore200e.c
+--- linux-2.6.30.8/drivers/atm/fore200e.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/fore200e.c 2009-07-30 09:48:09.993922247 -0400
+@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
+ #endif
+ /* check error condition */
+ if (*entry->status & STATUS_ERROR)
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ else
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+ }
+
+@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
+ if (skb == NULL) {
+ DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
+
+ dev_kfree_skb_any(skb);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
+ DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+ fore200e->atm_dev->number,
+ entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+
+@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
+ goto retry_here;
+ }
+
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+
+ fore200e->tx_sat++;
+ DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff -urNp linux-2.6.30.8/drivers/atm/he.c linux-2.6.30.8/drivers/atm/he.c
+--- linux-2.6.30.8/drivers/atm/he.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/he.c 2009-07-30 09:48:09.994421569 -0400
+@@ -1728,7 +1728,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+
+ if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto return_host_buffers;
+ }
+
+@@ -1761,7 +1761,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ RBRQ_LEN_ERR(he_dev->rbrq_head)
+ ? "LEN_ERR" : "",
+ vcc->vpi, vcc->vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto return_host_buffers;
+ }
+
+@@ -1820,7 +1820,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return_host_buffers:
+ ++pdus_assembled;
+@@ -2165,7 +2165,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
+ tpd->vcc->pop(tpd->vcc, tpd->skb);
+ else
+ dev_kfree_skb_any(tpd->skb);
+- atomic_inc(&tpd->vcc->stats->tx_err);
++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+ }
+ pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ return;
+@@ -2577,7 +2577,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+
+@@ -2588,7 +2588,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+ #endif
+@@ -2600,7 +2600,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2673,7 +2673,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ __enqueue_tpd(he_dev, tpd, cid);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -urNp linux-2.6.30.8/drivers/atm/horizon.c linux-2.6.30.8/drivers/atm/horizon.c
+--- linux-2.6.30.8/drivers/atm/horizon.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/horizon.c 2009-07-30 09:48:09.994421569 -0400
+@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
+ {
+ struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+ // VC layer stats
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsability
+ vcc->push (vcc, skb);
+@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
+ dev->tx_iovec = NULL;
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the skb
+ hrz_kfree_skb (skb);
+diff -urNp linux-2.6.30.8/drivers/atm/idt77252.c linux-2.6.30.8/drivers/atm/idt77252.c
+--- linux-2.6.30.8/drivers/atm/idt77252.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/idt77252.c 2009-07-30 09:48:09.995868107 -0400
+@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
+ else
+ dev_kfree_skb(skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+
+ atomic_dec(&scq->used);
+@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for aal0.\n",
+ card->name);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+ card->name);
+- atomic_add(i - 1, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+ dev_kfree_skb(sb);
+ break;
+ }
+@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ cell += ATM_CELL_PAYLOAD;
+ }
+@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ "(CDC: %08x)\n",
+ card->name, len, rpp->len, readl(SAR_REG_CDC));
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (stat & SAR_RSQE_CRC) {
+ RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ RXPRINTK("%s: Can't alloc RX skb.\n",
+ card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (!atm_charge(vcc, skb->truesize)) {
+@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return;
+ }
+@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ if (skb->truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
+ if (vcc->qos.aal != ATM_AAL0) {
+ RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+ card->name, vpi, vci);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto drop;
+ }
+
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for AAL0.\n",
+ card->name);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto drop;
+ }
+
+@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ drop:
+ skb_pull(queue, 64);
+@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ if (vc == NULL) {
+ printk("%s: NULL connection in send().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (!test_bit(VCF_TX, &vc->flags)) {
+ printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+ break;
+ default:
+ printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("%s: No scatter-gather yet.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ err = queue_skb(card, vc, skb, oam);
+ if (err) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return err;
+ }
+@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
+ skb = dev_alloc_skb(64);
+ if (!skb) {
+ printk("%s: Out of memory in send_oam().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOMEM;
+ }
+ atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff -urNp linux-2.6.30.8/drivers/atm/iphase.c linux-2.6.30.8/drivers/atm/iphase.c
+--- linux-2.6.30.8/drivers/atm/iphase.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/iphase.c 2009-07-30 09:48:09.996522301 -0400
+@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
+ status = (u_short) (buf_desc_ptr->desc_mode);
+ if (status & (RX_CER | RX_PTE | RX_OFL))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("IA: bad packet, dropping it");)
+ if (status & RX_CER) {
+ IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
+ len = dma_addr - buf_addr;
+ if (len > iadev->rx_buf_sz) {
+ printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out_free_desc;
+ }
+
+@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
+ ia_vcc = INPH_IA_VCC(vcc);
+ if (ia_vcc == NULL)
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ dev_kfree_skb_any(skb);
+ atm_return(vcc, atm_guess_pdu2truesize(len));
+ goto INCR_DLE;
+@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
+ if ((length > iadev->rx_buf_sz) || (length >
+ (skb->len - sizeof(struct cpcs_trailer))))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
+ length, skb->len);)
+ dev_kfree_skb_any(skb);
+@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
+
+ IF_RX(printk("rx_dle_intr: skb push");)
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ iadev->rx_pkt_cnt++;
+ }
+ INCR_DLE:
+@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ if ((desc == 0) || (desc > iadev->num_tx_desc))
+ {
+ IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+@@ -3024,7 +3024,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ ATM_DESC(skb) = vcc->vci;
+ skb_queue_tail(&iadev->tx_dma_q, skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ iadev->tx_pkt_cnt++;
+ /* Increment transaction counter */
+ writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
+diff -urNp linux-2.6.30.8/drivers/atm/lanai.c linux-2.6.30.8/drivers/atm/lanai.c
+--- linux-2.6.30.8/drivers/atm/lanai.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/lanai.c 2009-07-30 09:48:09.997872955 -0400
+@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
+ vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+ lanai_endtx(lanai, lvcc);
+ lanai_free_skb(lvcc->tx.atmvcc, skb);
+- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
+ ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+ __net_timestamp(skb);
+ lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+ out:
+ lvcc->rx.buf.ptr = end;
+ cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
+ DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+ "vcc %d\n", lanai->number, (unsigned int) s, vci);
+ lanai->stats.service_rxnotaal5++;
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ return 0;
+ }
+ if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
+ int bytes;
+ read_unlock(&vcc_sklist_lock);
+ DPRINTK("got trashed rx pdu on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_trash++;
+ bytes = (SERVICE_GET_END(s) * 16) -
+ (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
+ }
+ if (s & SERVICE_STREAM) {
+ read_unlock(&vcc_sklist_lock);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_stream++;
+ printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+ "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
+ return 0;
+ }
+ DPRINTK("got rx crc error on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_rxcrc++;
+ lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+ cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff -urNp linux-2.6.30.8/drivers/atm/nicstar.c linux-2.6.30.8/drivers/atm/nicstar.c
+--- linux-2.6.30.8/drivers/atm/nicstar.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/nicstar.c 2009-07-30 09:48:09.998576713 -0400
+@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if ((vc = (vc_map *) vcc->dev_data) == NULL)
+ {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (!vc->tx)
+ {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
+ {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (skb_shinfo(skb)->nr_frags != 0)
+ {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0)
+ {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
+ {
+ printk("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+- atomic_add(i,&vcc->stats->rx_drop);
++ atomic_add_unchecked(i,&vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize))
+ {
+ RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
++ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (iovb == NULL)
+ {
+ printk("nicstar%d: Out of iovec buffers.\n", card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
+ {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
+ NS_SKB(iovb)->iovcnt = 0;
+ iovb->len = 0;
+@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, skb->truesize))
+ {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+ else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
+@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, sb->truesize))
+ {
+ push_rxbufs(card, sb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (!atm_charge(vcc, skb->truesize))
+ {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ if (hb == NULL)
+ {
+ printk("nicstar%d: Out of huge buffers.\n", card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
+ NS_SKB(iovb)->iovcnt);
+ vc->rx_iov = NULL;
+@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ }
+ else
+ dev_kfree_skb_any(hb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ }
+ else
+ {
+@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
+ #endif /* NS_USE_DESTRUCTORS */
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+
+diff -urNp linux-2.6.30.8/drivers/atm/solos-pci.c linux-2.6.30.8/drivers/atm/solos-pci.c
+--- linux-2.6.30.8/drivers/atm/solos-pci.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/solos-pci.c 2009-07-30 09:48:09.998576713 -0400
+@@ -663,7 +663,7 @@ void solos_bh(unsigned long card_arg)
+ }
+ atm_charge(vcc, skb->truesize);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ break;
+
+ case PKT_STATUS:
+@@ -966,7 +966,7 @@ static uint32_t fpga_tx(struct solos_car
+ vcc = SKB_CB(oldskb)->vcc;
+
+ if (vcc) {
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ solos_pop(vcc, oldskb);
+ } else
+ dev_kfree_skb_irq(oldskb);
+diff -urNp linux-2.6.30.8/drivers/atm/suni.c linux-2.6.30.8/drivers/atm/suni.c
+--- linux-2.6.30.8/drivers/atm/suni.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/suni.c 2009-07-30 09:48:09.998576713 -0400
+@@ -49,7 +49,7 @@ static DEFINE_SPINLOCK(sunis_lock);
+
+
+ #define ADD_LIMITED(s,v) \
+- atomic_add((v),&stats->s); \
++ atomic_add_unchecked((v),&stats->s); \
+ if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
+
+
+diff -urNp linux-2.6.30.8/drivers/atm/uPD98402.c linux-2.6.30.8/drivers/atm/uPD98402.c
+--- linux-2.6.30.8/drivers/atm/uPD98402.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/uPD98402.c 2009-07-30 09:48:09.999830275 -0400
+@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
+ struct sonet_stats tmp;
+ int error = 0;
+
+- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+ sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+ if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+ if (zero && !error) {
+@@ -160,7 +160,7 @@ static int uPD98402_ioctl(struct atm_dev
+
+
+ #define ADD_LIMITED(s,v) \
+- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
+ if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+ atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+
+@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
+ if (reason & uPD98402_INT_PFM) stat_event(dev);
+ if (reason & uPD98402_INT_PCO) {
+ (void) GET(PCOCR); /* clear interrupt cause */
+- atomic_add(GET(HECCT),
++ atomic_add_unchecked(GET(HECCT),
+ &PRIV(dev)->sonet_stats.uncorr_hcs);
+ }
+ if ((reason & uPD98402_INT_RFO) &&
+diff -urNp linux-2.6.30.8/drivers/atm/zatm.c linux-2.6.30.8/drivers/atm/zatm.c
+--- linux-2.6.30.8/drivers/atm/zatm.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/atm/zatm.c 2009-07-30 09:48:09.999830275 -0400
+@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ }
+ if (!size) {
+ dev_kfree_skb_irq(skb);
+- if (vcc) atomic_inc(&vcc->stats->rx_err);
++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+ continue;
+ }
+ if (!atm_charge(vcc,skb->truesize)) {
+@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ skb->len = size;
+ ATM_SKB(skb)->vcc = vcc;
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
+ skb_queue_head(&zatm_vcc->backlog,skb);
+ break;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&zatm_vcc->tx_wait);
+ }
+
+diff -urNp linux-2.6.30.8/drivers/block/cciss.c linux-2.6.30.8/drivers/block/cciss.c
+--- linux-2.6.30.8/drivers/block/cciss.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/block/cciss.c 2009-07-30 09:48:10.000592968 -0400
+@@ -351,7 +351,7 @@ static void cciss_seq_stop(struct seq_fi
+ h->busy_configuring = 0;
+ }
+
+-static struct seq_operations cciss_seq_ops = {
++static const struct seq_operations cciss_seq_ops = {
+ .start = cciss_seq_start,
+ .show = cciss_seq_show,
+ .next = cciss_seq_next,
+@@ -414,7 +414,7 @@ out:
+ return err;
+ }
+
+-static struct file_operations cciss_proc_fops = {
++static const struct file_operations cciss_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cciss_seq_open,
+ .read = seq_read,
+diff -urNp linux-2.6.30.8/drivers/char/agp/alpha-agp.c linux-2.6.30.8/drivers/char/agp/alpha-agp.c
+--- linux-2.6.30.8/drivers/char/agp/alpha-agp.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/agp/alpha-agp.c 2009-07-30 09:48:10.000592968 -0400
+@@ -40,7 +40,7 @@ static struct aper_size_info_fixed alpha
+ { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+ };
+
+-struct vm_operations_struct alpha_core_agp_vm_ops = {
++const struct vm_operations_struct alpha_core_agp_vm_ops = {
+ .fault = alpha_core_agp_vm_fault,
+ };
+
+diff -urNp linux-2.6.30.8/drivers/char/agp/frontend.c linux-2.6.30.8/drivers/char/agp/frontend.c
+--- linux-2.6.30.8/drivers/char/agp/frontend.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/agp/frontend.c 2009-07-30 09:48:10.001783459 -0400
+@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+diff -urNp linux-2.6.30.8/drivers/char/agp/intel-agp.c linux-2.6.30.8/drivers/char/agp/intel-agp.c
+--- linux-2.6.30.8/drivers/char/agp/intel-agp.c 2009-09-26 23:07:15.572375574 -0400
++++ linux-2.6.30.8/drivers/char/agp/intel-agp.c 2009-09-26 23:07:26.697782298 -0400
+@@ -2378,7 +2378,7 @@ static struct pci_device_id agp_intel_pc
+ ID(PCI_DEVICE_ID_INTEL_Q45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G41_HB),
+- { }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+diff -urNp linux-2.6.30.8/drivers/char/apm-emulation.c linux-2.6.30.8/drivers/char/apm-emulation.c
+--- linux-2.6.30.8/drivers/char/apm-emulation.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/apm-emulation.c 2009-07-30 09:48:10.002661044 -0400
+@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode
+ return as ? 0 : -ENOMEM;
+ }
+
+-static struct file_operations apm_bios_fops = {
++static const struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+diff -urNp linux-2.6.30.8/drivers/char/bfin-otp.c linux-2.6.30.8/drivers/char/bfin-otp.c
+--- linux-2.6.30.8/drivers/char/bfin-otp.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/bfin-otp.c 2009-07-30 09:48:10.003480690 -0400
+@@ -133,7 +133,7 @@ static ssize_t bfin_otp_write(struct fil
+ # define bfin_otp_write NULL
+ #endif
+
+-static struct file_operations bfin_otp_fops = {
++static const struct file_operations bfin_otp_fops = {
+ .owner = THIS_MODULE,
+ .read = bfin_otp_read,
+ .write = bfin_otp_write,
+diff -urNp linux-2.6.30.8/drivers/char/hpet.c linux-2.6.30.8/drivers/char/hpet.c
+--- linux-2.6.30.8/drivers/char/hpet.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/hpet.c 2009-07-30 09:48:10.003480690 -0400
+@@ -995,7 +995,7 @@ static struct acpi_driver hpet_acpi_driv
+ },
+ };
+
+-static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
++static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL };
+
+ static int __init hpet_init(void)
+ {
+diff -urNp linux-2.6.30.8/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.30.8/drivers/char/ipmi/ipmi_msghandler.c
+--- linux-2.6.30.8/drivers/char/ipmi/ipmi_msghandler.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/ipmi/ipmi_msghandler.c 2009-07-30 09:48:10.004509700 -0400
+@@ -413,7 +413,7 @@ struct ipmi_smi {
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+- atomic_t stats[IPMI_NUM_STATS];
++ atomic_unchecked_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+@@ -446,7 +446,7 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+ #define ipmi_inc_stat(intf, stat) \
+- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+diff -urNp linux-2.6.30.8/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.30.8/drivers/char/ipmi/ipmi_si_intf.c
+--- linux-2.6.30.8/drivers/char/ipmi/ipmi_si_intf.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/ipmi/ipmi_si_intf.c 2009-07-30 09:48:10.005414644 -0400
+@@ -277,7 +277,7 @@ struct smi_info {
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+- atomic_t stats[SI_NUM_STATS];
++ atomic_unchecked_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+@@ -285,7 +285,7 @@ struct smi_info {
+ };
+
+ #define smi_inc_stat(smi, stat) \
+- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+diff -urNp linux-2.6.30.8/drivers/char/keyboard.c linux-2.6.30.8/drivers/char/keyboard.c
+--- linux-2.6.30.8/drivers/char/keyboard.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/keyboard.c 2009-07-30 11:10:48.982870250 -0400
+@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
+ kbd->kbdmode == VC_MEDIUMRAW) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc);
+ }
+
+@@ -1388,7 +1398,7 @@ static const struct input_device_id kbd_
+ .evbit = { BIT_MASK(EV_SND) },
+ },
+
+- { }, /* Terminating entry */
++ { 0 }, /* Terminating entry */
+ };
+
+ MODULE_DEVICE_TABLE(input, kbd_ids);
+diff -urNp linux-2.6.30.8/drivers/char/mem.c linux-2.6.30.8/drivers/char/mem.c
+--- linux-2.6.30.8/drivers/char/mem.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/mem.c 2009-07-30 12:07:09.578070399 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -35,6 +36,10 @@
+ # include <linux/efi.h>
+ #endif
+
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern struct file_operations grsec_fops;
++#endif
++
+ /*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+@@ -192,6 +197,11 @@ static ssize_t write_mem(struct file * f
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_write();
++ return -EPERM;
++#endif
++
+ written = 0;
+
+ #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+@@ -301,7 +311,7 @@ static inline int private_mapping_ok(str
+ }
+ #endif
+
+-static struct vm_operations_struct mmap_mem_ops = {
++static const struct vm_operations_struct mmap_mem_ops = {
+ #ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys
+ #endif
+@@ -324,6 +334,11 @@ static int mmap_mem(struct file * file,
+ &vma->vm_page_prot))
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma))
++ return -EPERM;
++#endif
++
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size,
+ vma->vm_page_prot);
+@@ -558,6 +573,11 @@ static ssize_t write_kmem(struct file *
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_kmem_write();
++ return -EPERM;
++#endif
++
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+@@ -764,6 +784,16 @@ static loff_t memory_lseek(struct file *
+
+ static int open_port(struct inode * inode, struct file * filp)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_open_port();
++ return -EPERM;
++#endif
++
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ }
+
+@@ -771,7 +801,6 @@ static int open_port(struct inode * inod
+ #define full_lseek null_lseek
+ #define write_zero write_null
+ #define read_full read_zero
+-#define open_mem open_port
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
+@@ -911,6 +940,11 @@ static int memory_open(struct inode * in
+ filp->f_op = &oldmem_fops;
+ break;
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ case 13:
++ filp->f_op = &grsec_fops;
++ break;
++#endif
+ default:
+ unlock_kernel();
+ return -ENXIO;
+@@ -947,6 +981,9 @@ static const struct {
+ #ifdef CONFIG_CRASH_DUMP
+ {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ {13,"grsec", S_IRUSR | S_IWUGO, &grsec_fops},
++#endif
+ };
+
+ static struct class *mem_class;
+diff -urNp linux-2.6.30.8/drivers/char/misc.c linux-2.6.30.8/drivers/char/misc.c
+--- linux-2.6.30.8/drivers/char/misc.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/misc.c 2009-07-30 09:48:10.006662764 -0400
+@@ -91,7 +91,7 @@ static int misc_seq_show(struct seq_file
+ }
+
+
+-static struct seq_operations misc_seq_ops = {
++static const struct seq_operations misc_seq_ops = {
+ .start = misc_seq_start,
+ .next = misc_seq_next,
+ .stop = misc_seq_stop,
+diff -urNp linux-2.6.30.8/drivers/char/mspec.c linux-2.6.30.8/drivers/char/mspec.c
+--- linux-2.6.30.8/drivers/char/mspec.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/mspec.c 2009-07-30 09:48:10.006662764 -0400
+@@ -239,7 +239,7 @@ mspec_fault(struct vm_area_struct *vma,
+ return VM_FAULT_NOPAGE;
+ }
+
+-static struct vm_operations_struct mspec_vm_ops = {
++static const struct vm_operations_struct mspec_vm_ops = {
+ .open = mspec_open,
+ .close = mspec_close,
+ .fault = mspec_fault,
+diff -urNp linux-2.6.30.8/drivers/char/nvram.c linux-2.6.30.8/drivers/char/nvram.c
+--- linux-2.6.30.8/drivers/char/nvram.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/nvram.c 2009-07-30 09:48:10.006662764 -0400
+@@ -429,7 +429,10 @@ static const struct file_operations nvra
+ static struct miscdevice nvram_dev = {
+ NVRAM_MINOR,
+ "nvram",
+- &nvram_fops
++ &nvram_fops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+ static int __init nvram_init(void)
+diff -urNp linux-2.6.30.8/drivers/char/random.c linux-2.6.30.8/drivers/char/random.c
+--- linux-2.6.30.8/drivers/char/random.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/random.c 2009-07-30 11:10:48.992521357 -0400
+@@ -253,8 +253,13 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define INPUT_POOL_WORDS 512
++#define OUTPUT_POOL_WORDS 128
++#else
+ #define INPUT_POOL_WORDS 128
+ #define OUTPUT_POOL_WORDS 32
++#endif
+ #define SEC_XFER_SIZE 512
+
+ /*
+@@ -291,10 +296,17 @@ static struct poolinfo {
+ int poolwords;
+ int tap1, tap2, tap3, tap4, tap5;
+ } poolinfo_table[] = {
++#ifdef CONFIG_GRKERNSEC_RANDNET
++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
++ { 512, 411, 308, 208, 104, 1 },
++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
++ { 128, 103, 76, 51, 25, 1 },
++#else
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { 2048, 1638, 1231, 819, 411, 1 },
+@@ -1204,7 +1216,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+
+diff -urNp linux-2.6.30.8/drivers/char/tpm/tpm_bios.c linux-2.6.30.8/drivers/char/tpm/tpm_bios.c
+--- linux-2.6.30.8/drivers/char/tpm/tpm_bios.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/tpm/tpm_bios.c 2009-07-30 09:48:10.007651841 -0400
+@@ -343,14 +343,14 @@ static int tpm_ascii_bios_measurements_s
+ return 0;
+ }
+
+-static struct seq_operations tpm_ascii_b_measurments_seqops = {
++static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_ascii_bios_measurements_show,
+ };
+
+-static struct seq_operations tpm_binary_b_measurments_seqops = {
++static const struct seq_operations tpm_binary_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+diff -urNp linux-2.6.30.8/drivers/char/tty_ldisc.c linux-2.6.30.8/drivers/char/tty_ldisc.c
+--- linux-2.6.30.8/drivers/char/tty_ldisc.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/tty_ldisc.c 2009-07-30 09:48:10.008436205 -0400
+@@ -73,7 +73,7 @@ int tty_register_ldisc(int disc, struct
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+- new_ldisc->refcount = 0;
++ atomic_set(&new_ldisc->refcount, 0);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+@@ -101,7 +101,7 @@ int tty_unregister_ldisc(int disc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty_ldiscs[disc]->refcount)
++ if (atomic_read(&tty_ldiscs[disc]->refcount))
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+@@ -138,7 +138,7 @@ static int tty_ldisc_try_get(int disc, s
+ err = -EAGAIN;
+ else {
+ /* lock it */
+- ldops->refcount++;
++ atomic_inc(&ldops->refcount);
+ ld->ops = ldops;
+ err = 0;
+ }
+@@ -195,8 +195,8 @@ static void tty_ldisc_put(struct tty_ldi
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = tty_ldiscs[disc];
+- BUG_ON(ld->refcount == 0);
+- ld->refcount--;
++ BUG_ON(atomic_read(&ld->refcount) == 0);
++ atomic_dec(&ld->refcount);
+ module_put(ld->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+@@ -263,7 +263,7 @@ const struct file_operations tty_ldiscs_
+
+ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
+ {
+- ld->refcount = 0;
++ atomic_set(&ld->refcount, 0);
+ tty->ldisc = *ld;
+ }
+
+@@ -288,7 +288,7 @@ static int tty_ldisc_try(struct tty_stru
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = &tty->ldisc;
+ if (test_bit(TTY_LDISC, &tty->flags)) {
+- ld->refcount++;
++ atomic_inc(&ld->refcount);
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+@@ -315,7 +315,7 @@ struct tty_ldisc *tty_ldisc_ref_wait(str
+ {
+ /* wait_event is a macro */
+ wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
+- WARN_ON(tty->ldisc.refcount == 0);
++ WARN_ON(atomic_read(&tty->ldisc.refcount) == 0);
+ return &tty->ldisc;
+ }
+
+@@ -358,11 +358,9 @@ void tty_ldisc_deref(struct tty_ldisc *l
+ BUG_ON(ld == NULL);
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (ld->refcount == 0)
++ if (!atomic_add_unless(&ld->refcount, -1, 0))
+ printk(KERN_ERR "tty_ldisc_deref: no references.\n");
+- else
+- ld->refcount--;
+- if (ld->refcount == 0)
++ if (atomic_read(&ld->refcount) == 0)
+ wake_up(&tty_ldisc_wait);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+@@ -506,8 +504,8 @@ restart:
+ clear_bit(TTY_LDISC, &o_tty->flags);
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
+- if (tty->ldisc.refcount) {
++ if (atomic_read(&tty->ldisc.refcount) || (o_tty && atomic_read(&o_tty->ldisc.refcount))) {
++ if (atomic_read(&tty->ldisc.refcount)) {
+ /* Free the new ldisc we grabbed. Must drop the lock
+ first. */
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+@@ -519,14 +517,14 @@ restart:
+ * and retries if we made tty_ldisc_wait() smarter.
+ * That is up for discussion.
+ */
+- if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
++ if (wait_event_interruptible(tty_ldisc_wait, atomic_read(&tty->ldisc.refcount) == 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+- if (o_tty && o_tty->ldisc.refcount) {
++ if (o_tty && atomic_read(&o_tty->ldisc.refcount)) {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ tty_ldisc_put(o_tty->ldisc.ops);
+- if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
++ if (wait_event_interruptible(tty_ldisc_wait, atomic_read(&o_tty->ldisc.refcount) == 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+@@ -669,9 +667,9 @@ void tty_ldisc_release(struct tty_struct
+ * side is zero.
+ */
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- while (tty->ldisc.refcount) {
++ while (atomic_read(&tty->ldisc.refcount)) {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+- wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
++ wait_event(tty_ldisc_wait, atomic_read(&tty->ldisc.refcount) == 0);
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+diff -urNp linux-2.6.30.8/drivers/char/vt_ioctl.c linux-2.6.30.8/drivers/char/vt_ioctl.c
+--- linux-2.6.30.8/drivers/char/vt_ioctl.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/vt_ioctl.c 2009-07-30 11:10:49.002716445 -0400
+@@ -96,6 +96,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ return -EPERM;
++#endif
++
+ if (!i && v == K_NOSUCHMAP) {
+ /* deallocate map */
+ key_map = key_maps[s];
+@@ -236,6 +242,13 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
+ goto reterr;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG)) {
++ ret = -EPERM;
++ goto reterr;
++ }
++#endif
++
+ q = func_table[i];
+ first_free = funcbufptr + (funcbufsize - funcbufleft);
+ for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+diff -urNp linux-2.6.30.8/drivers/char/xilinx_hwicap/xilinx_hwicap.c linux-2.6.30.8/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+--- linux-2.6.30.8/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2009-07-30 09:48:10.008436205 -0400
+@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *
+ return status;
+ }
+
+-static struct file_operations hwicap_fops = {
++static const struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+diff -urNp linux-2.6.30.8/drivers/edac/edac_core.h linux-2.6.30.8/drivers/edac/edac_core.h
+--- linux-2.6.30.8/drivers/edac/edac_core.h 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/edac/edac_core.h 2009-07-30 09:48:10.008436205 -0400
+@@ -98,11 +98,11 @@ extern int edac_debug_level;
+
+ #else /* !CONFIG_EDAC_DEBUG */
+
+-#define debugf0( ... )
+-#define debugf1( ... )
+-#define debugf2( ... )
+-#define debugf3( ... )
+-#define debugf4( ... )
++#define debugf0( ... ) do {} while (0)
++#define debugf1( ... ) do {} while (0)
++#define debugf2( ... ) do {} while (0)
++#define debugf3( ... ) do {} while (0)
++#define debugf4( ... ) do {} while (0)
+
+ #endif /* !CONFIG_EDAC_DEBUG */
+
+diff -urNp linux-2.6.30.8/drivers/firmware/dmi_scan.c linux-2.6.30.8/drivers/firmware/dmi_scan.c
+--- linux-2.6.30.8/drivers/firmware/dmi_scan.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/firmware/dmi_scan.c 2009-07-30 09:48:10.009412825 -0400
+@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
+ }
+ }
+ else {
+- /*
+- * no iounmap() for that ioremap(); it would be a no-op, but
+- * it's so early in setup that sucker gets confused into doing
+- * what it shouldn't if we actually call it.
+- */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto error;
+diff -urNp linux-2.6.30.8/drivers/gpio/gpiolib.c linux-2.6.30.8/drivers/gpio/gpiolib.c
+--- linux-2.6.30.8/drivers/gpio/gpiolib.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpio/gpiolib.c 2009-07-30 09:48:10.009412825 -0400
+@@ -1244,7 +1244,7 @@ static int gpiolib_open(struct inode *in
+ return single_open(file, gpiolib_show, NULL);
+ }
+
+-static struct file_operations gpiolib_operations = {
++static const struct file_operations gpiolib_operations = {
+ .open = gpiolib_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/drm_drv.c linux-2.6.30.8/drivers/gpu/drm/drm_drv.c
+--- linux-2.6.30.8/drivers/gpu/drm/drm_drv.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/drm_drv.c 2009-07-30 09:48:10.010417819 -0400
+@@ -425,7 +425,7 @@ int drm_ioctl(struct inode *inode, struc
+ char *kdata = NULL;
+
+ atomic_inc(&dev->ioctl_count);
+- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/drm_fops.c linux-2.6.30.8/drivers/gpu/drm/drm_fops.c
+--- linux-2.6.30.8/drivers/gpu/drm/drm_fops.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/drm_fops.c 2009-07-30 09:48:10.010417819 -0400
+@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
+
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
+ spin_lock(&dev->count_lock);
+- if (!dev->open_count++) {
++ if (atomic_inc_return(&dev->open_count) == 1) {
+ spin_unlock(&dev->count_lock);
+ retcode = drm_setup(dev);
+ goto out;
+@@ -433,7 +433,7 @@ int drm_release(struct inode *inode, str
+
+ lock_kernel();
+
+- DRM_DEBUG("open_count = %d\n", dev->open_count);
++ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+@@ -445,7 +445,7 @@ int drm_release(struct inode *inode, str
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+- dev->open_count);
++ atomic_read(&dev->open_count));
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+@@ -522,9 +522,9 @@ int drm_release(struct inode *inode, str
+ * End inline drm_release
+ */
+
+- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
+ spin_lock(&dev->count_lock);
+- if (!--dev->open_count) {
++ if (atomic_dec_and_test(&dev->open_count)) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/drm_lock.c linux-2.6.30.8/drivers/gpu/drm/drm_lock.c
+--- linux-2.6.30.8/drivers/gpu/drm/drm_lock.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/drm_lock.c 2009-07-30 09:48:10.010417819 -0400
+@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
+ return -EINVAL;
+ }
+
+- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ /* kernel_context_switch isn't used by any of the x86 drm
+ * modules but is required by the Sparc driver.
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/drm_vm.c linux-2.6.30.8/drivers/gpu/drm/drm_vm.c
+--- linux-2.6.30.8/drivers/gpu/drm/drm_vm.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/drm_vm.c 2009-07-30 09:48:10.011410038 -0400
+@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_are
+ }
+
+ /** AGP virtual memory operations */
+-static struct vm_operations_struct drm_vm_ops = {
++static const struct vm_operations_struct drm_vm_ops = {
+ .fault = drm_vm_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+ };
+
+ /** Shared virtual memory operations */
+-static struct vm_operations_struct drm_vm_shm_ops = {
++static const struct vm_operations_struct drm_vm_shm_ops = {
+ .fault = drm_vm_shm_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_shm_close,
+ };
+
+ /** DMA virtual memory operations */
+-static struct vm_operations_struct drm_vm_dma_ops = {
++static const struct vm_operations_struct drm_vm_dma_ops = {
+ .fault = drm_vm_dma_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+ };
+
+ /** Scatter-gather virtual memory operations */
+-static struct vm_operations_struct drm_vm_sg_ops = {
++static const struct vm_operations_struct drm_vm_sg_ops = {
+ .fault = drm_vm_sg_fault,
+ .open = drm_vm_open,
+ .close = drm_vm_close,
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/i810/i810_dma.c linux-2.6.30.8/drivers/gpu/drm/i810/i810_dma.c
+--- linux-2.6.30.8/drivers/gpu/drm/i810/i810_dma.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/i810/i810_dma.c 2009-07-30 09:48:10.011410038 -0400
+@@ -954,8 +954,8 @@ static int i810_dma_vertex(struct drm_de
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
+
+- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+@@ -1117,8 +1117,8 @@ static int i810_dma_mc(struct drm_device
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
+
+- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+diff -urNp linux-2.6.30.8/drivers/gpu/drm/i915/i915_drv.c linux-2.6.30.8/drivers/gpu/drm/i915/i915_drv.c
+--- linux-2.6.30.8/drivers/gpu/drm/i915/i915_drv.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/gpu/drm/i915/i915_drv.c 2009-07-30 12:07:09.579971370 -0400
+@@ -149,7 +149,7 @@ i915_pci_resume(struct pci_dev *pdev)
+ return i915_resume(dev);
+ }
+
+-static struct vm_operations_struct i915_gem_vm_ops = {
++static const struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+diff -urNp linux-2.6.30.8/drivers/hwmon/fschmd.c linux-2.6.30.8/drivers/hwmon/fschmd.c
+--- linux-2.6.30.8/drivers/hwmon/fschmd.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/fschmd.c 2009-07-30 09:48:10.011410038 -0400
+@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *
+ return ret;
+ }
+
+-static struct file_operations watchdog_fops = {
++static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = watchdog_open,
+diff -urNp linux-2.6.30.8/drivers/hwmon/fscpos.c linux-2.6.30.8/drivers/hwmon/fscpos.c
+--- linux-2.6.30.8/drivers/hwmon/fscpos.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/fscpos.c 2009-07-30 09:48:10.015465337 -0400
+@@ -240,7 +240,6 @@ static ssize_t set_pwm(struct i2c_client
+ unsigned long v = simple_strtoul(buf, NULL, 10);
+
+ /* Range: 0..255 */
+- if (v < 0) v = 0;
+ if (v > 255) v = 255;
+
+ mutex_lock(&data->update_lock);
+diff -urNp linux-2.6.30.8/drivers/hwmon/k8temp.c linux-2.6.30.8/drivers/hwmon/k8temp.c
+--- linux-2.6.30.8/drivers/hwmon/k8temp.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/k8temp.c 2009-07-30 09:48:10.016410845 -0400
+@@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n
+
+ static struct pci_device_id k8temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
+- { 0 },
++ { 0, 0, 0, 0, 0, 0, 0 },
+ };
+
+ MODULE_DEVICE_TABLE(pci, k8temp_ids);
+diff -urNp linux-2.6.30.8/drivers/hwmon/sis5595.c linux-2.6.30.8/drivers/hwmon/sis5595.c
+--- linux-2.6.30.8/drivers/hwmon/sis5595.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/sis5595.c 2009-07-30 09:48:10.016410845 -0400
+@@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_upda
+
+ static struct pci_device_id sis5595_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
+diff -urNp linux-2.6.30.8/drivers/hwmon/via686a.c linux-2.6.30.8/drivers/hwmon/via686a.c
+--- linux-2.6.30.8/drivers/hwmon/via686a.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/via686a.c 2009-07-30 09:48:10.016410845 -0400
+@@ -769,7 +769,7 @@ static struct via686a_data *via686a_upda
+
+ static struct pci_device_id via686a_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
+diff -urNp linux-2.6.30.8/drivers/hwmon/vt8231.c linux-2.6.30.8/drivers/hwmon/vt8231.c
+--- linux-2.6.30.8/drivers/hwmon/vt8231.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/vt8231.c 2009-07-30 09:48:10.017409539 -0400
+@@ -699,7 +699,7 @@ static struct platform_driver vt8231_dri
+
+ static struct pci_device_id vt8231_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
+diff -urNp linux-2.6.30.8/drivers/hwmon/w83791d.c linux-2.6.30.8/drivers/hwmon/w83791d.c
+--- linux-2.6.30.8/drivers/hwmon/w83791d.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/hwmon/w83791d.c 2009-07-30 09:48:10.017409539 -0400
+@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
+ struct i2c_board_info *info);
+ static int w83791d_remove(struct i2c_client *client);
+
+-static int w83791d_read(struct i2c_client *client, u8 register);
+-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
++static int w83791d_read(struct i2c_client *client, u8 reg);
++static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
+ static struct w83791d_data *w83791d_update_device(struct device *dev);
+
+ #ifdef DEBUG
+diff -urNp linux-2.6.30.8/drivers/i2c/busses/i2c-i801.c linux-2.6.30.8/drivers/i2c/busses/i2c-i801.c
+--- linux-2.6.30.8/drivers/i2c/busses/i2c-i801.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/i2c/busses/i2c-i801.c 2009-07-30 09:48:10.018424106 -0400
+@@ -578,7 +578,7 @@ static struct pci_device_id i801_ids[] =
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, i801_ids);
+diff -urNp linux-2.6.30.8/drivers/i2c/busses/i2c-piix4.c linux-2.6.30.8/drivers/i2c/busses/i2c-piix4.c
+--- linux-2.6.30.8/drivers/i2c/busses/i2c-piix4.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/i2c/busses/i2c-piix4.c 2009-07-30 09:48:10.018424106 -0400
+@@ -123,7 +123,7 @@ static struct dmi_system_id __devinitdat
+ .ident = "IBM",
+ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
+ },
+- { },
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL },
+ };
+
+ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
+@@ -489,7 +489,7 @@ static struct pci_device_id piix4_ids[]
+ PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, piix4_ids);
+diff -urNp linux-2.6.30.8/drivers/i2c/busses/i2c-sis630.c linux-2.6.30.8/drivers/i2c/busses/i2c-sis630.c
+--- linux-2.6.30.8/drivers/i2c/busses/i2c-sis630.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/i2c/busses/i2c-sis630.c 2009-07-30 09:48:10.018424106 -0400
+@@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter
+ static struct pci_device_id sis630_ids[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, sis630_ids);
+diff -urNp linux-2.6.30.8/drivers/i2c/busses/i2c-sis96x.c linux-2.6.30.8/drivers/i2c/busses/i2c-sis96x.c
+--- linux-2.6.30.8/drivers/i2c/busses/i2c-sis96x.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/i2c/busses/i2c-sis96x.c 2009-07-30 09:48:10.018424106 -0400
+@@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter
+
+ static struct pci_device_id sis96x_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
+- { 0, }
++ { 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ MODULE_DEVICE_TABLE (pci, sis96x_ids);
+diff -urNp linux-2.6.30.8/drivers/ieee1394/dma.c linux-2.6.30.8/drivers/ieee1394/dma.c
+--- linux-2.6.30.8/drivers/ieee1394/dma.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/ieee1394/dma.c 2009-07-30 09:48:10.018424106 -0400
+@@ -247,7 +247,7 @@ static int dma_region_pagefault(struct v
+ return 0;
+ }
+
+-static struct vm_operations_struct dma_region_vm_ops = {
++static const struct vm_operations_struct dma_region_vm_ops = {
+ .fault = dma_region_pagefault,
+ };
+
+diff -urNp linux-2.6.30.8/drivers/ieee1394/dv1394.c linux-2.6.30.8/drivers/ieee1394/dv1394.c
+--- linux-2.6.30.8/drivers/ieee1394/dv1394.c 2009-07-24 17:47:51.000000000 -0400
++++ linux-2.6.30.8/drivers/ieee1394/dv1394.c 2009-07-30 09:48:10.020336753 -0400
+@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
+ based upon DIF section and sequence
+ */
+
+-static void inline
++static inline void
+ frame_put_packet (struct frame *f, struct packet *p)
+ {
+ int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
+@@ -2177,7 +2177,7 @@ static const struct ieee1394_device_id d