diff options
Diffstat (limited to 'main/linux-virt-grsec/grsecurity-2.9.1-3.9.11-unofficial-1.patch')
-rw-r--r-- | main/linux-virt-grsec/grsecurity-2.9.1-3.9.11-unofficial-1.patch | 105215 |
1 files changed, 105215 insertions, 0 deletions
diff --git a/main/linux-virt-grsec/grsecurity-2.9.1-3.9.11-unofficial-1.patch b/main/linux-virt-grsec/grsecurity-2.9.1-3.9.11-unofficial-1.patch new file mode 100644 index 000000000..932805c95 --- /dev/null +++ b/main/linux-virt-grsec/grsecurity-2.9.1-3.9.11-unofficial-1.patch @@ -0,0 +1,105215 @@ +diff --git a/Documentation/dontdiff b/Documentation/dontdiff +index b89a739..79768fb 100644 +--- a/Documentation/dontdiff ++++ b/Documentation/dontdiff +@@ -2,9 +2,11 @@ + *.aux + *.bin + *.bz2 ++*.c.[012]*.* + *.cis + *.cpio + *.csp ++*.dbg + *.dsp + *.dvi + *.elf +@@ -14,6 +16,7 @@ + *.gcov + *.gen.S + *.gif ++*.gmo + *.grep + *.grp + *.gz +@@ -48,14 +51,17 @@ + *.tab.h + *.tex + *.ver ++*.vim + *.xml + *.xz + *_MODULES ++*_reg_safe.h + *_vga16.c + *~ + \#*# + *.9 +-.* ++.[^g]* ++.gen* + .*.d + .mm + 53c700_d.h +@@ -69,9 +75,11 @@ Image + Module.markers + Module.symvers + PENDING ++PERF* + SCCS + System.map* + TAGS ++TRACEEVENT-CFLAGS + aconf + af_names.h + aic7*reg.h* +@@ -80,6 +88,7 @@ aic7*seq.h* + aicasm + aicdb.h* + altivec*.c ++ashldi3.S + asm-offsets.h + asm_offsets.h + autoconf.h* +@@ -92,19 +101,24 @@ bounds.h + bsetup + btfixupprep + build ++builtin-policy.h + bvmlinux + bzImage* + capability_names.h + capflags.c + classlist.h* ++clut_vga16.c ++common-cmds.h + comp*.log + compile.h* + conf + config + config-* + config_data.h* ++config.c + config.mak + config.mak.autogen ++config.tmp + conmakehash + consolemap_deftbl.c* + cpustr.h +@@ -115,9 +129,11 @@ devlist.h* + dnotify_test + docproc + dslm ++dtc-lexer.lex.c + elf2ecoff + elfconfig.h* + evergreen_reg_safe.h ++exception_policy.conf + fixdep + flask.h + fore200e_mkfirm +@@ -125,12 +141,15 @@ fore200e_pca_fw.c* + gconf + gconf.glade.h + gen-devlist ++gen-kdb_cmds.c + gen_crc32table + gen_init_cpio + generated + genheaders + genksyms + *_gray256.c ++hash ++hid-example + hpet_example + hugepage-mmap + hugepage-shm +@@ -145,14 +164,14 @@ int32.c + int4.c + int8.c + kallsyms +-kconfig ++kern_constants.h + keywords.c + ksym.c* + ksym.h* + kxgettext + lex.c + lex.*.c +-linux ++lib1funcs.S + logo_*.c + logo_*_clut224.c + logo_*_mono.c +@@ -162,14 +181,15 @@ mach-types.h + machtypes.h + map + map_hugetlb +-media + mconf ++mdp + miboot* + mk_elfconfig + mkboot + mkbugboot + mkcpustr + mkdep ++mkpiggy + mkprep + mkregtable + mktables +@@ -185,6 +205,8 @@ oui.c* + page-types + parse.c + parse.h ++parse-events* ++pasyms.h + patches* + pca200e.bin + pca200e_ecd.bin2 +@@ -194,6 +216,7 @@ perf-archive + piggyback + piggy.gzip + piggy.S ++pmu-* + pnmtologo + ppc_defs.h* + pss_boot.h +@@ -203,7 +226,10 @@ r200_reg_safe.h + r300_reg_safe.h + r420_reg_safe.h + r600_reg_safe.h ++realmode.lds ++realmode.relocs + recordmcount ++regdb.c + relocs + rlim_names.h + rn50_reg_safe.h +@@ -213,8 +239,12 @@ series + setup + setup.bin + setup.elf ++signing_key* ++size_overflow_hash.h + sImage ++slabinfo + sm_tbl* ++sortextable + split-include + syscalltab.h + tables.c +@@ -224,6 +254,7 @@ tftpboot.img + timeconst.h + times.h* + trix_boot.h ++user_constants.h + utsrelease.h* + vdso-syms.lds + vdso.lds +@@ -235,13 +266,17 @@ vdso32.lds + vdso32.so.dbg + vdso64.lds + vdso64.so.dbg ++vdsox32.lds ++vdsox32-syms.lds + version.h* + vmImage + vmlinux + vmlinux-* + vmlinux.aout + vmlinux.bin.all ++vmlinux.bin.bz2 + vmlinux.lds ++vmlinux.relocs + vmlinuz + voffset.h + vsyscall.lds +@@ -249,9 +284,12 @@ vsyscall_32.lds + wanxlfw.inc + uImage + unifdef ++utsrelease.h + wakeup.bin + wakeup.elf + wakeup.lds ++x509* + zImage* + zconf.hash.c ++zconf.lex.c + zoffset.h +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 8ccbf27..afffeb4 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -948,6 +948,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0. + Default: 1024 + ++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to ++ ignore grsecurity's /proc restrictions ++ ++ + hashdist= [KNL,NUMA] Large hashes allocated during boot + are distributed across NUMA nodes. Defaults on + for 64-bit NUMA, off otherwise. +@@ -2147,6 +2151,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + the specified number of seconds. This is to be used if + your oopses keep scrolling off the screen. + ++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain ++ virtualization environments that don't cope well with the ++ expand down segment used by UDEREF on X86-32 or the frequent ++ page table updates on X86-64. ++ ++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already. ++ ++ pax_extra_latent_entropy ++ Enable a very simple form of latent entropy extraction ++ from the first 4GB of memory as the bootmem allocator ++ passes the memory pages to the buddy allocator. ++ + pcbit= [HW,ISDN] + + pcd. [PARIDE] +diff --git a/Makefile b/Makefile +index ad368cd..96b21c3 100644 +--- a/Makefile ++++ b/Makefile +@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ + + HOSTCC = gcc + HOSTCXX = g++ +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +-HOSTCXXFLAGS = -O2 ++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks ++HOSTCFLAGS += $(call cc-option, -Wno-empty-body) ++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks + + # Decide whether to build built-in, modular, or both. + # Normally, just do built-in. +@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ + # Rules shared between *config targets and build targets + + # Basic helpers built in scripts/ +-PHONY += scripts_basic +-scripts_basic: ++PHONY += scripts_basic gcc-plugins ++scripts_basic: gcc-plugins + $(Q)$(MAKE) $(build)=scripts/basic + $(Q)rm -f .tmp_quiet_recordmcount + +@@ -576,6 +577,65 @@ else + KBUILD_CFLAGS += -O2 + endif + ++ifndef DISABLE_PAX_PLUGINS ++ifeq ($(call cc-ifversion, -ge, 0408, y), y) ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)") ++else ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)") ++endif ++ifneq ($(PLUGINCC),) ++ifdef CONFIG_PAX_CONSTIFY_PLUGIN ++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN ++endif ++ifdef CONFIG_PAX_MEMORY_STACKLEAK ++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN ++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 ++endif ++ifdef CONFIG_KALLOCSTAT_PLUGIN ++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so ++endif ++ifdef CONFIG_PAX_KERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so ++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN ++endif ++ifdef CONFIG_CHECKER_PLUGIN ++ifeq ($(call cc-ifversion, -ge, 0406, y), y) ++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN ++endif ++endif ++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so ++ifdef CONFIG_PAX_SIZE_OVERFLOW ++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN ++endif ++ifdef CONFIG_PAX_LATENT_ENTROPY ++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN ++endif ++ifdef CONFIG_PAX_MEMORY_STRUCTLEAK ++STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN ++endif ++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS) ++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS) ++export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN ++ifeq ($(KBUILD_EXTMOD),) ++gcc-plugins: ++ $(Q)$(MAKE) $(build)=tools/gcc ++else ++gcc-plugins: ; ++endif ++else ++gcc-plugins: ++ifeq ($(call cc-ifversion, -ge, 0405, y), y) ++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) ++else ++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" ++endif ++ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active." ++endif ++endif ++ + include $(srctree)/arch/$(SRCARCH)/Makefile + + ifdef CONFIG_READABLE_ASM +@@ -733,7 +793,7 @@ export mod_sign_cmd + + + ifeq ($(KBUILD_EXTMOD),) +-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + + vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ +@@ -780,6 +840,8 @@ endif + + # The actual objects are generated when descending, + # make sure no implicit rule kicks in ++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; + + # Handle descending into subdirectories listed in $(vmlinux-dirs) +@@ -789,7 +851,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; + # Error messages still appears in the original language + + PHONY += $(vmlinux-dirs) +-$(vmlinux-dirs): prepare scripts ++$(vmlinux-dirs): gcc-plugins prepare scripts + $(Q)$(MAKE) $(build)=$@ + + # Store (new) KERNELRELASE string in include/config/kernel.release +@@ -833,6 +895,7 @@ prepare0: archprepare FORCE + $(Q)$(MAKE) $(build)=. + + # All the preparing.. ++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) + prepare: prepare0 + + # Generate some files +@@ -940,6 +1003,8 @@ all: modules + # using awk while concatenating to the final file. + + PHONY += modules ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin + $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order + @$(kecho) ' Building modules, stage 2.'; +@@ -955,7 +1020,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) + + # Target to prepare building external modules + PHONY += modules_prepare +-modules_prepare: prepare scripts ++modules_prepare: gcc-plugins prepare scripts + + # Target to install modules + PHONY += modules_install +@@ -1021,7 +1086,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ + Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ + signing_key.priv signing_key.x509 x509.genkey \ + extra_certificates signing_key.x509.keyid \ +- signing_key.x509.signer ++ signing_key.x509.signer tools/gcc/size_overflow_hash.h + + # clean - Delete most, but leave enough to build external modules + # +@@ -1061,6 +1126,7 @@ distclean: mrproper + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ + -o -name '.*.rej' \ ++ -o -name '.*.rej' -o -name '*.so' \ + -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ + -type f -print | xargs rm -f + +@@ -1221,6 +1287,8 @@ PHONY += $(module-dirs) modules + $(module-dirs): crmodverdir $(objtree)/Module.symvers + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) + ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(module-dirs) + @$(kecho) ' Building modules, stage 2.'; + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost +@@ -1357,17 +1425,21 @@ else + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) + endif + +-%.s: %.c prepare scripts FORCE ++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.s: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.i: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.c prepare scripts FORCE ++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.o: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.lst: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.s: %.S prepare scripts FORCE ++%.s: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.S prepare scripts FORCE ++%.o: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.symtypes: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +@@ -1377,11 +1449,15 @@ endif + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%/: prepare scripts FORCE ++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%/: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%.ko: prepare scripts FORCE ++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.ko: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) $(@:.ko=.o) +diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h +index c2cbe4f..f7264b4 100644 +--- a/arch/alpha/include/asm/atomic.h ++++ b/arch/alpha/include/asm/atomic.h +@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + #define atomic_dec(v) atomic_sub(1,(v)) + #define atomic64_dec(v) atomic64_sub(1,(v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h +index ad368a9..fbe0f25 100644 +--- a/arch/alpha/include/asm/cache.h ++++ b/arch/alpha/include/asm/cache.h +@@ -4,19 +4,19 @@ + #ifndef __ARCH_ALPHA_CACHE_H + #define __ARCH_ALPHA_CACHE_H + ++#include <linux/const.h> + + /* Bytes per L1 (data) cache line. */ + #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) +-# define L1_CACHE_BYTES 64 + # define L1_CACHE_SHIFT 6 + #else + /* Both EV4 and EV5 are write-through, read-allocate, + direct-mapped, physical. + */ +-# define L1_CACHE_BYTES 32 + # define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #endif +diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h +index 968d999..d36b2df 100644 +--- a/arch/alpha/include/asm/elf.h ++++ b/arch/alpha/include/asm/elf.h +@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) ++#endif ++ + /* $0 is set by ld.so to a pointer to a function which might be + registered using atexit. This provides a mean for the dynamic + linker to call DT_FINI functions for shared libraries that have +diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h +index bc2a0da..8ad11ee 100644 +--- a/arch/alpha/include/asm/pgalloc.h ++++ b/arch/alpha/include/asm/pgalloc.h +@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) + pgd_set(pgd, pmd); + } + ++static inline void ++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) ++{ ++ pgd_populate(mm, pgd, pmd); ++} ++ + extern pgd_t *pgd_alloc(struct mm_struct *mm); + + static inline void +diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h +index 81a4342..348b927 100644 +--- a/arch/alpha/include/asm/pgtable.h ++++ b/arch/alpha/include/asm/pgtable.h +@@ -102,6 +102,17 @@ struct vm_area_struct; + #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) + #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) + + #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) +diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c +index 2fd00b7..cfd5069 100644 +--- a/arch/alpha/kernel/module.c ++++ b/arch/alpha/kernel/module.c +@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + + /* The small sections were sorted to the end of the segment. + The following should definitely cover them. */ +- gp = (u64)me->module_core + me->core_size - 0x8000; ++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; + got = sechdrs[me->arch.gotsecindex].sh_addr; + + for (i = 0; i < n; i++) { +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c +index b9e37ad..44c24e7 100644 +--- a/arch/alpha/kernel/osf_sys.c ++++ b/arch/alpha/kernel/osf_sys.c +@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) + generic version except that we know how to honor ADDR_LIMIT_32BIT. */ + + static unsigned long +-arch_get_unmapped_area_1(unsigned long addr, unsigned long len, +- unsigned long limit) ++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len, ++ unsigned long limit, unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + info.flags = 0; + info.length = len; +@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + merely specific addresses, but regions of memory -- perhaps + this feature should be incorporated into all ports? */ + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags); + if (addr != (unsigned long) -ENOMEM) + return addr; + } + + /* Next, try allocating at TASK_UNMAPPED_BASE. */ +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), +- len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags); ++ + if (addr != (unsigned long) -ENOMEM) + return addr; + + /* Finally, try allocating in low memory. */ +- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags); + + return addr; + } +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index 0c4132d..88f0d53 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm) + __reload_thread(pcb); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int ldah, ldq, jmp; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (ldq & 0xFFFF0000U) == 0xA77B0000U && ++ jmp == 0x6BFB0000U) ++ { ++ unsigned long r27, addr; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; ++ ++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ err = get_user(r27, (unsigned long *)addr); ++ if (err) ++ break; ++ ++ regs->r27 = r27; ++ regs->pc = r27; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ldah, lda, br; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(lda, (unsigned int *)(regs->pc+4)); ++ err |= get_user(br, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (lda & 0xFFFF0000U) == 0xA77B0000U && ++ (br & 0xFFE00000U) == 0xC3E00000U) ++ { ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; ++ ++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int br; ++ ++ err = get_user(br, (unsigned int *)regs->pc); ++ ++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { ++ unsigned int br2, ldq, nop, jmp; ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; ++ ++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ err = get_user(br2, (unsigned int *)addr); ++ err |= get_user(ldq, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ err |= get_user(jmp, (unsigned int *)(addr+12)); ++ err |= get_user(resolver, (unsigned long *)(addr+16)); ++ ++ if (err) ++ break; ++ ++ if (br2 == 0xC3600000U && ++ ldq == 0xA77B000CU && ++ nop == 0x47FF041FU && ++ jmp == 0x6B7B0000U) ++ { ++ regs->r28 = regs->pc+4; ++ regs->r27 = addr+16; ++ regs->pc = resolver; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif + + /* + * This routine handles page faults. It determines the address, +@@ -133,8 +251,29 @@ retry: + good_area: + si_code = SEGV_ACCERR; + if (cause < 0) { +- if (!(vma->vm_flags & VM_EXEC)) ++ if (!(vma->vm_flags & VM_EXEC)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); ++ do_group_exit(SIGKILL); ++#else + goto bad_area; ++#endif ++ ++ } + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 70cd012..71b82cd 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1860,7 +1860,7 @@ config ALIGNMENT_TRAP + + config UACCESS_WITH_MEMCPY + bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()" +- depends on MMU ++ depends on MMU && !PAX_MEMORY_UDEREF + default y if CPU_FEROCEON + help + Implement faster copy_to_user and clear_user methods for CPU +diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h +index c79f61f..9ac0642 100644 +--- a/arch/arm/include/asm/atomic.h ++++ b/arch/arm/include/asm/atomic.h +@@ -17,17 +17,35 @@ + #include <asm/barrier.h> + #include <asm/cmpxchg.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + + #ifdef __KERNEL__ + ++#define _ASM_EXTABLE(from, to) \ ++" .pushsection __ex_table,\"a\"\n"\ ++" .align 3\n" \ ++" .long " #from ", " #to"\n" \ ++" .popsection" ++ + /* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic_set(v,i) (((v)->counter) = (i)) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + + #if __LINUX_ARM_ARCH__ >= 6 + +@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v) + int result; + + __asm__ __volatile__("@ atomic_add\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ __asm__ __volatile__("@ atomic_add_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_add_return\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic_add_return_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v) + int result; + + __asm__ __volatile__("@ atomic_sub\n" ++"1: ldrex %1, [%3]\n" ++" subs %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ __asm__ __volatile__("@ atomic_sub_unchecked\n" + "1: ldrex %0, [%3]\n" + " sub %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_sub_return\n" +-"1: ldrex %0, [%3]\n" +-" sub %0, %0, %4\n" ++"1: ldrex %1, [%3]\n" ++" subs %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strex %1, %0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) + return oldval; + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) ++{ ++ unsigned long oldval, res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" ++ "ldrex %1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "strexeq %0, %5, [%3]\n" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "Ir" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) + { + unsigned long tmp, tmp2; +@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v) + + return val; + } ++ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ return atomic_add_return(i, v); ++} ++ + #define atomic_add(i, v) (void) atomic_add_return(i, v) ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ (void) atomic_add_return(i, v); ++} + + static inline int atomic_sub_return(int i, atomic_t *v) + { +@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) + return val; + } + #define atomic_sub(i, v) (void) atomic_sub_return(i, v) ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ (void) atomic_sub_return(i, v); ++} + + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + { +@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + return ret; + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return atomic_cmpxchg(v, old, new); ++} ++ + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) + { + unsigned long flags; +@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) + #endif /* __LINUX_ARM_ARCH__ */ + + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + } + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + + #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v) == 0; ++} + #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +@@ -241,6 +428,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(i) { (i) } + + static inline u64 atomic64_read(const atomic64_t *v) +@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v) + return result; + } + ++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v) ++{ ++ u64 result; ++ ++ __asm__ __volatile__("@ atomic64_read_unchecked\n" ++" ldrexd %0, %H0, [%1]" ++ : "=&r" (result) ++ : "r" (&v->counter), "Qo" (v->counter) ++ ); ++ ++ return result; ++} ++ + static inline void atomic64_set(atomic64_t *v, u64 i) + { + u64 tmp; +@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i) + : "cc"); + } + ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i) ++{ ++ u64 tmp; ++ ++ __asm__ __volatile__("@ atomic64_set_unchecked\n" ++"1: ldrexd %0, %H0, [%2]\n" ++" strexd %0, %3, %H3, [%2]\n" ++" teq %0, #0\n" ++" bne 1b" ++ : "=&r" (tmp), "=Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ + static inline void atomic64_add(u64 i, atomic64_t *v) + { + u64 result; +@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_add\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %0, %0, %4\n" ++" adcs %H0, %H0, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ ++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v) ++{ ++ u64 result; ++ unsigned long tmp; ++ ++ __asm__ __volatile__("@ atomic64_add_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" adds %0, %0, %4\n" + " adc %H0, %H0, %H4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v) + + static inline u64 atomic64_add_return(u64 i, atomic64_t *v) + { ++ u64 result, tmp; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic64_add_return\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" adds %0, %1, %4\n" ++" adcs %H0, %H1, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v) ++{ + u64 result; + unsigned long tmp; + + smp_mb(); + +- __asm__ __volatile__("@ atomic64_add_return\n" ++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %0, %0, %4\n" + " adc %H0, %H0, %H4\n" +@@ -318,23 +607,34 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_sub\n" + "1: ldrexd %0, %H0, [%3]\n" + " subs %0, %0, %4\n" +-" sbc %H0, %H0, %H4\n" ++" sbcs %H0, %H0, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + } + +-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) ++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v) + { + u64 result; + unsigned long tmp; + +- smp_mb(); +- +- __asm__ __volatile__("@ atomic64_sub_return\n" ++ __asm__ __volatile__("@ atomic64_sub_unchecked\n" + "1: ldrexd %0, %H0, [%3]\n" + " subs %0, %0, %4\n" + " sbc %H0, %H0, %H4\n" +@@ -344,6 +644,39 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); ++} ++ ++static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) ++{ ++ u64 result, tmp; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic64_sub_return\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" subs %0, %1, %4\n" ++" sbcs %H0, %H1, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); + + smp_mb(); + +@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) + return oldval; + } + ++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new) ++{ ++ u64 oldval; ++ unsigned long res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n" ++ "ldrexd %1, %H1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "teqeq %H1, %H4\n" ++ "strexdeq %0, %5, %H5, [%3]" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "r" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) + { + u64 result; +@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) + + static inline u64 atomic64_dec_if_positive(atomic64_t *v) + { +- u64 result; +- unsigned long tmp; ++ u64 result, tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +-"1: ldrexd %0, %H0, [%3]\n" +-" subs %0, %0, #1\n" +-" sbc %H0, %H0, #0\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" subs %0, %1, #1\n" ++" sbcs %H0, %H1, #0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " teq %H0, #0\n" +-" bmi 2f\n" ++" bmi 4f\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter) + : "cc"); +@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) + " teq %0, %5\n" + " teqeq %H0, %H5\n" + " moveq %1, #0\n" +-" beq 2f\n" ++" beq 4f\n" + " adds %0, %0, %6\n" +-" adc %H0, %H0, %H6\n" ++" adcs %H0, %H0, %H6\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %2, %0, %H0, [%4]\n" + " teq %2, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); +@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) + + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + #define atomic64_inc(v) atomic64_add(1LL, (v)) ++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) + #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) ++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v)) + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) + #define atomic64_dec(v) atomic64_sub(1LL, (v)) ++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) + #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h +index 75fe66b..ba3dee4 100644 +--- a/arch/arm/include/asm/cache.h ++++ b/arch/arm/include/asm/cache.h +@@ -4,8 +4,10 @@ + #ifndef __ASMARM_CACHE_H + #define __ASMARM_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +@@ -24,5 +26,6 @@ + #endif + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__ ((__section__(".data..read_only"))) + + #endif +diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h +index 738fcba..7a43500 100644 +--- a/arch/arm/include/asm/cacheflush.h ++++ b/arch/arm/include/asm/cacheflush.h +@@ -116,7 +116,7 @@ struct cpu_cache_fns { + void (*dma_unmap_area)(const void *, size_t, int); + + void (*dma_flush_range)(const void *, const void *); +-}; ++} __no_const; + + /* + * Select the calling method +diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h +index 6dcc164..b14d917 100644 +--- a/arch/arm/include/asm/checksum.h ++++ b/arch/arm/include/asm/checksum.h +@@ -37,7 +37,19 @@ __wsum + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); + + __wsum +-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); ++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); ++ ++static inline __wsum ++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) ++{ ++ __wsum ret; ++ pax_open_userland(); ++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr); ++ pax_close_userland(); ++ return ret; ++} ++ ++ + + /* + * Fold a partial checksum without adding pseudo headers +diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h +index 4f009c1..466c59b 100644 +--- a/arch/arm/include/asm/cmpxchg.h ++++ b/arch/arm/include/asm/cmpxchg.h +@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size + + #define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++#define xchg_unchecked(ptr,x) \ ++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + + #include <asm-generic/cmpxchg-local.h> + +diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h +index 6ddbe44..b5e38b1 100644 +--- a/arch/arm/include/asm/domain.h ++++ b/arch/arm/include/asm/domain.h +@@ -48,18 +48,37 @@ + * Domain types + */ + #define DOMAIN_NOACCESS 0 +-#define DOMAIN_CLIENT 1 + #ifdef CONFIG_CPU_USE_DOMAINS ++#define DOMAIN_USERCLIENT 1 ++#define DOMAIN_KERNELCLIENT 1 + #define DOMAIN_MANAGER 3 ++#define DOMAIN_VECTORS DOMAIN_USER ++#else ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#define DOMAIN_MANAGER 1 ++#define DOMAIN_KERNEXEC 3 + #else + #define DOMAIN_MANAGER 1 + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define DOMAIN_USERCLIENT 0 ++#define DOMAIN_UDEREF 1 ++#define DOMAIN_VECTORS DOMAIN_KERNEL ++#else ++#define DOMAIN_USERCLIENT 1 ++#define DOMAIN_VECTORS DOMAIN_USER ++#endif ++#define DOMAIN_KERNELCLIENT 1 ++ ++#endif ++ + #define domain_val(dom,type) ((type) << (2*(dom))) + + #ifndef __ASSEMBLY__ + +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + static inline void set_domain(unsigned val) + { + asm volatile( +@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val) + isb(); + } + +-#define modify_domain(dom,type) \ +- do { \ +- struct thread_info *thread = current_thread_info(); \ +- unsigned int domain = thread->cpu_domain; \ +- domain &= ~domain_val(dom, DOMAIN_MANAGER); \ +- thread->cpu_domain = domain | domain_val(dom, type); \ +- set_domain(thread->cpu_domain); \ +- } while (0) +- ++extern void modify_domain(unsigned int dom, unsigned int type); + #else + static inline void set_domain(unsigned val) { } + static inline void modify_domain(unsigned dom, unsigned type) { } +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h +index 38050b1..9d90e8b 100644 +--- a/arch/arm/include/asm/elf.h ++++ b/arch/arm/include/asm/elf.h +@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00008000UL ++ ++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#endif + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + extern void elf_set_personality(const struct elf32_hdr *); + #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif +diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h +index de53547..52b9a28 100644 +--- a/arch/arm/include/asm/fncpy.h ++++ b/arch/arm/include/asm/fncpy.h +@@ -81,7 +81,9 @@ + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ + \ ++ pax_open_kernel(); \ + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ ++ pax_close_kernel(); \ + flush_icache_range((unsigned long)(dest_buf), \ + (unsigned long)(dest_buf) + (size)); \ + \ +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h +index e42cf59..7b94b8f 100644 +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ pax_open_userland(); ++ + smp_mb(); + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: ldrex %1, [%4]\n" +@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + : "cc", "memory"); + smp_mb(); + ++ pax_close_userland(); ++ + *uval = val; + return ret; + } +@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ pax_open_userland(); ++ + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: " TUSER(ldr) " %1, [%4]\n" + " teq %1, %2\n" +@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) + : "cc", "memory"); + ++ pax_close_userland(); ++ + *uval = val; + return ret; + } +@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + return -EFAULT; + + pagefault_disable(); /* implies preempt_disable() */ ++ pax_open_userland(); + + switch (op) { + case FUTEX_OP_SET: +@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + ret = -ENOSYS; + } + ++ pax_close_userland(); + pagefault_enable(); /* subsumes preempt_enable() */ + + if (!ret) { +diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h +index 83eb2f7..ed77159 100644 +--- a/arch/arm/include/asm/kmap_types.h ++++ b/arch/arm/include/asm/kmap_types.h +@@ -4,6 +4,6 @@ + /* + * This is the "bare minimum". AIO seems to require this. + */ +-#define KM_TYPE_NR 16 ++#define KM_TYPE_NR 17 + + #endif +diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h +index 9e614a1..3302cca 100644 +--- a/arch/arm/include/asm/mach/dma.h ++++ b/arch/arm/include/asm/mach/dma.h +@@ -22,7 +22,7 @@ struct dma_ops { + int (*residue)(unsigned int, dma_t *); /* optional */ + int (*setspeed)(unsigned int, dma_t *, int); /* optional */ + const char *type; +-}; ++} __do_const; + + struct dma_struct { + void *addr; /* single DMA address */ +diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h +index 2fe141f..192dc01 100644 +--- a/arch/arm/include/asm/mach/map.h ++++ b/arch/arm/include/asm/mach/map.h +@@ -27,13 +27,16 @@ struct map_desc { + #define MT_MINICLEAN 6 + #define MT_LOW_VECTORS 7 + #define MT_HIGH_VECTORS 8 +-#define MT_MEMORY 9 ++#define MT_MEMORY_RWX 9 + #define MT_ROM 10 +-#define MT_MEMORY_NONCACHED 11 ++#define MT_MEMORY_NONCACHED_RX 11 + #define MT_MEMORY_DTCM 12 + #define MT_MEMORY_ITCM 13 + #define MT_MEMORY_SO 14 + #define MT_MEMORY_DMA_READY 15 ++#define MT_MEMORY_RW 16 ++#define MT_MEMORY_RX 17 ++#define MT_MEMORY_NONCACHED_RW 18 + + #ifdef CONFIG_MMU + extern void iotable_init(struct map_desc *, int); +diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h +index 12f71a1..04e063c 100644 +--- a/arch/arm/include/asm/outercache.h ++++ b/arch/arm/include/asm/outercache.h +@@ -35,7 +35,7 @@ struct outer_cache_fns { + #endif + void (*set_debug)(unsigned long); + void (*resume)(void); +-}; ++} __no_const; + + #ifdef CONFIG_OUTER_CACHE + +diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h +index 812a494..71fc0b6 100644 +--- a/arch/arm/include/asm/page.h ++++ b/arch/arm/include/asm/page.h +@@ -114,7 +114,7 @@ struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +-}; ++} __no_const; + + #ifdef MULTI_USER + extern struct cpu_user_fns cpu_user; +diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h +index 943504f..c37a730 100644 +--- a/arch/arm/include/asm/pgalloc.h ++++ b/arch/arm/include/asm/pgalloc.h +@@ -17,6 +17,7 @@ + #include <asm/processor.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/system_info.h> + + #define check_pgt_cache() do { } while (0) + +@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); + } + ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} ++ + #else /* !CONFIG_ARM_LPAE */ + + /* +@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) + #define pmd_free(mm, pmd) do { } while (0) + #define pud_populate(mm,pmd,pte) BUG() ++#define pud_populate_kernel(mm,pmd,pte) BUG() + + #endif /* CONFIG_ARM_LPAE */ + +@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) + __free_page(pte); + } + ++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot) ++{ ++#ifdef CONFIG_ARM_LPAE ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); ++#else ++ if (addr & SECTION_SIZE) ++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot); ++ else ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); ++#endif ++ flush_pmd_entry(pmdp); ++} ++ + static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + pmdval_t prot) + { +@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) + static inline void + pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) + { +- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); ++ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask); + } + #define pmd_pgtable(pmd) pmd_page(pmd) + +diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h +index 5cfba15..f415e1a 100644 +--- a/arch/arm/include/asm/pgtable-2level-hwdef.h ++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h +@@ -20,12 +20,15 @@ + #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) + #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) + #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) ++#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ + #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) + #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) + #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ ++ + /* + * - section + */ ++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ + #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) + #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) + #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ +@@ -37,6 +40,7 @@ + #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ + #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ + #define PMD_SECT_AF (_AT(pmdval_t, 0)) ++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0)) + + #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) + #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +@@ -66,6 +70,7 @@ + * - extended small page/tiny page + */ + #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ ++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */ + #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) + #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) + #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h +index f97ee02..07f1be5 100644 +--- a/arch/arm/include/asm/pgtable-2level.h ++++ b/arch/arm/include/asm/pgtable-2level.h +@@ -125,6 +125,7 @@ + #define L_PTE_XN (_AT(pteval_t, 1) << 9) + #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ + #define L_PTE_NONE (_AT(pteval_t, 1) << 11) ++#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/ + + /* + * These are the memory types, defined to be compatible with +diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h +index 18f5cef..25b8f43 100644 +--- a/arch/arm/include/asm/pgtable-3level-hwdef.h ++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h +@@ -41,6 +41,7 @@ + */ + #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) + #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) ++#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) + #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) + #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) + #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) +@@ -71,6 +72,7 @@ + #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ + #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ + #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ ++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ + #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ + + /* +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h +index 86b8fe3..e25f975 100644 +--- a/arch/arm/include/asm/pgtable-3level.h ++++ b/arch/arm/include/asm/pgtable-3level.h +@@ -74,6 +74,7 @@ + #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ + #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ + #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ ++#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */ + #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ + #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ + #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ +@@ -82,6 +83,7 @@ + /* + * To be used in assembly code with the upper page attributes. + */ ++#define L_PTE_PXN_HIGH (1 << (53 - 32)) + #define L_PTE_XN_HIGH (1 << (54 - 32)) + #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) + +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h +index 9bcd262..fba731c 100644 +--- a/arch/arm/include/asm/pgtable.h ++++ b/arch/arm/include/asm/pgtable.h +@@ -30,6 +30,9 @@ + #include <asm/pgtable-2level.h> + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + /* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the +@@ -45,6 +48,9 @@ + #define LIBRARY_TEXT_START 0x0c000000 + + #ifndef __ASSEMBLY__ ++extern pteval_t __supported_pte_mask; ++extern pmdval_t __supported_pmd_mask; ++ + extern void __pte_error(const char *file, int line, pte_t); + extern void __pmd_error(const char *file, int line, pmd_t); + extern void __pgd_error(const char *file, int line, pgd_t); +@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t); + #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) + #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#include <asm/domain.h> ++#include <linux/thread_info.h> ++#include <linux/preempt.h> ++#endif ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++static inline int test_domain(int domain, int domaintype) ++{ ++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype); ++} ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) { ++#ifdef CONFIG_ARM_LPAE ++ /* TODO */ ++#else ++ preempt_disable(); ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC)); ++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC); ++#endif ++ return 0; ++} ++ ++static inline unsigned long pax_close_kernel(void) { ++#ifdef CONFIG_ARM_LPAE ++ /* TODO */ ++#else ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER)); ++ /* DOMAIN_MANAGER = "client" under KERNEXEC */ ++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER); ++ preempt_enable_no_resched(); ++#endif ++ return 0; ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * This is the lowest virtual address we can permit any user space + * mapping to be mapped at. This is particularly important for +@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t); + /* + * The pgprot_* and protection_map entries will be fixed up in runtime + * to include the cachable and bufferable bits based on memory policy, +- * as well as any architecture dependent bits like global/ASID and SMP +- * shared mapping bits. ++ * as well as any architecture dependent bits like global/ASID, PXN, ++ * and SMP shared mapping bits. + */ + #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG + +@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + { + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | +- L_PTE_NONE | L_PTE_VALID; ++ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + return pte; + } +diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h +index f3628fb..a0672dd 100644 +--- a/arch/arm/include/asm/proc-fns.h ++++ b/arch/arm/include/asm/proc-fns.h +@@ -75,7 +75,7 @@ extern struct processor { + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); +-} processor; ++} __do_const processor; + + #ifndef MULTI_CPU + extern void cpu_proc_init(void); +diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h +index 06e7d50..8a8e251 100644 +--- a/arch/arm/include/asm/processor.h ++++ b/arch/arm/include/asm/processor.h +@@ -65,9 +65,8 @@ struct thread_struct { + regs->ARM_cpsr |= PSR_ENDSTATE; \ + regs->ARM_pc = pc & ~1; /* pc */ \ + regs->ARM_sp = sp; /* sp */ \ +- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ +- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ +- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ ++ /* r2 (envp), r1 (argv), r0 (argc) */ \ ++ (void)copy_from_user(®s->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \ + nommu_start_thread(regs); \ + }) + +diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h +index ce0dbe7..c085b6f 100644 +--- a/arch/arm/include/asm/psci.h ++++ b/arch/arm/include/asm/psci.h +@@ -29,7 +29,7 @@ struct psci_operations { + int (*cpu_off)(struct psci_power_state state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); +-}; ++} __no_const; + + extern struct psci_operations psci_ops; + +diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h +index d3a22be..3a69ad5 100644 +--- a/arch/arm/include/asm/smp.h ++++ b/arch/arm/include/asm/smp.h +@@ -107,7 +107,7 @@ struct smp_operations { + int (*cpu_disable)(unsigned int cpu); + #endif + #endif +-}; ++} __no_const; + + /* + * set platform specific SMP operations +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index cddda1f..ff357f7 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -77,9 +77,9 @@ struct thread_info { + .flags = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ ++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ ++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \ ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, + #define TIF_SYSCALL_AUDIT 9 + #define TIF_SYSCALL_TRACEPOINT 10 + #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ ++ ++/* within 8 bits of TIF_SYSCALL_TRACE ++ * to meet flexible second operand requirements ++ */ ++#define TIF_GRSEC_SETXID 12 ++ + #define TIF_USING_IWMMXT 17 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ + #define TIF_RESTORE_SIGMASK 20 +@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* Checks for any syscall work in entry-common.S */ + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ +- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) + + /* + * Change these and you break ASM code in entry-common.S +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index 7e1f760..510061e 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -18,6 +18,7 @@ + #include <asm/domain.h> + #include <asm/unified.h> + #include <asm/compiler.h> ++#include <asm/pgtable.h> + + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 +@@ -63,11 +64,35 @@ extern int __put_user_bad(void); + static inline void set_fs(mm_segment_t fs) + { + current_thread_info()->addr_limit = fs; +- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); ++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); + } + + #define segment_eq(a,b) ((a) == (b)) + ++static inline void pax_open_userland(void) ++{ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (segment_eq(get_fs(), USER_DS) { ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); ++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF); ++ } ++#endif ++ ++} ++ ++static inline void pax_close_userland(void) ++{ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (segment_eq(get_fs(), USER_DS) { ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); ++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); ++ } ++#endif ++ ++} ++ + #define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__("cmp %2, %0; movlo %0, #0" \ +@@ -143,8 +168,12 @@ extern int __get_user_4(void *); + + #define get_user(x,p) \ + ({ \ ++ int __e; \ + might_fault(); \ +- __get_user_check(x,p); \ ++ pax_open_userland(); \ ++ __e = __get_user_check(x,p); \ ++ pax_close_userland(); \ ++ __e; \ + }) + + extern int __put_user_1(void *, unsigned int); +@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long); + + #define put_user(x,p) \ + ({ \ ++ int __e; \ + might_fault(); \ +- __put_user_check(x,p); \ ++ pax_open_userland(); \ ++ __e = __put_user_check(x,p); \ ++ pax_close_userland(); \ ++ __e; \ + }) + + #else /* CONFIG_MMU */ +@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs) + #define __get_user(x,ptr) \ + ({ \ + long __gu_err = 0; \ ++ pax_open_userland(); \ + __get_user_err((x),(ptr),__gu_err); \ ++ pax_close_userland(); \ + __gu_err; \ + }) + + #define __get_user_error(x,ptr,err) \ + ({ \ ++ pax_open_userland(); \ + __get_user_err((x),(ptr),err); \ ++ pax_close_userland(); \ + (void) 0; \ + }) + +@@ -312,13 +349,17 @@ do { \ + #define __put_user(x,ptr) \ + ({ \ + long __pu_err = 0; \ ++ pax_open_userland(); \ + __put_user_err((x),(ptr),__pu_err); \ ++ pax_close_userland(); \ + __pu_err; \ + }) + + #define __put_user_error(x,ptr,err) \ + ({ \ ++ pax_open_userland(); \ + __put_user_err((x),(ptr),err); \ ++ pax_close_userland(); \ + (void) 0; \ + }) + +@@ -418,11 +459,44 @@ do { \ + + + #ifdef CONFIG_MMU +-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); +-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); ++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); ++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); ++ ++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ unsigned long ret; ++ ++ check_object_size(to, n, false); ++ pax_open_userland(); ++ ret = ___copy_from_user(to, from, n); ++ pax_close_userland(); ++ return ret; ++} ++ ++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ unsigned long ret; ++ ++ check_object_size(from, n, true); ++ pax_open_userland(); ++ ret = ___copy_to_user(to, from, n); ++ pax_close_userland(); ++ return ret; ++} ++ + extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); +-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); ++extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n); + extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); ++ ++static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n) ++{ ++ unsigned long ret; ++ pax_open_userland(); ++ ret = ___clear_user(addr, n); ++ pax_close_userland(); ++ return ret; ++} ++ + #else + #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) + #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) +@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l + + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else /* security hole - plug it */ +@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u + + static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h +index 96ee092..37f1844 100644 +--- a/arch/arm/include/uapi/asm/ptrace.h ++++ b/arch/arm/include/uapi/asm/ptrace.h +@@ -73,7 +73,7 @@ + * ARMv7 groups of PSR bits + */ + #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ +-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ ++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */ + #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ + #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ + +diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c +index 60d3b73..e5a0f22 100644 +--- a/arch/arm/kernel/armksyms.c ++++ b/arch/arm/kernel/armksyms.c +@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops); + + /* networking */ + EXPORT_SYMBOL(csum_partial); +-EXPORT_SYMBOL(csum_partial_copy_from_user); ++EXPORT_SYMBOL(__csum_partial_copy_from_user); + EXPORT_SYMBOL(csum_partial_copy_nocheck); + EXPORT_SYMBOL(__csum_ipv6_magic); + +@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero); + #ifdef CONFIG_MMU + EXPORT_SYMBOL(copy_page); + +-EXPORT_SYMBOL(__copy_from_user); +-EXPORT_SYMBOL(__copy_to_user); +-EXPORT_SYMBOL(__clear_user); ++EXPORT_SYMBOL(___copy_from_user); ++EXPORT_SYMBOL(___copy_to_user); ++EXPORT_SYMBOL(___clear_user); + + EXPORT_SYMBOL(__get_user_1); + EXPORT_SYMBOL(__get_user_2); +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index 0f82098..fb3d3d5 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -47,6 +47,87 @@ + 9997: + .endm + ++ .macro pax_enter_kernel ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ make aligned space for saved DACR ++ sub sp, sp, #8 ++ @ save regs ++ stmdb sp!, {r1, r2} ++ @ read DACR from cpu_domain into r1 ++ mov r2, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r2, r2, #(0x1fc0) ++ bic r2, r2, #(0x3f) ++ ldr r1, [r2, #TI_CPU_DOMAIN] ++ @ store old DACR on stack ++ str r1, [sp, #8] ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r2, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r1, r2} ++#endif ++ .endm ++ ++ .macro pax_open_userland ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++ @ set current DOMAIN_USER to DOMAIN_CLIENT ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ ++ .macro pax_close_userland ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ + .macro pabt_helper + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 + #ifdef MULTI_PABORT +@@ -89,11 +170,15 @@ + * Invalid mode handlers + */ + .macro inv_entry, reason ++ ++ pax_enter_kernel ++ + sub sp, sp, #S_FRAME_SIZE + ARM( stmib sp, {r1 - lr} ) + THUMB( stmia sp, {r0 - r12} ) + THUMB( str sp, [sp, #S_SP] ) + THUMB( str lr, [sp, #S_LR] ) ++ + mov r1, #\reason + .endm + +@@ -149,7 +234,11 @@ ENDPROC(__und_invalid) + .macro svc_entry, stack_hole=0 + UNWIND(.fnstart ) + UNWIND(.save {r0 - pc} ) ++ ++ pax_enter_kernel ++ + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) ++ + #ifdef CONFIG_THUMB2_KERNEL + SPFIX( str r0, [sp] ) @ temporarily saved + SPFIX( mov r0, sp ) +@@ -164,7 +253,12 @@ ENDPROC(__und_invalid) + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ offset sp by 8 as done in pax_enter_kernel ++ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4) ++#else + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) ++#endif + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied + @ from the exception stack +@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc) + .macro usr_entry + UNWIND(.fnstart ) + UNWIND(.cantunwind ) @ don't unwind the user space ++ ++ pax_enter_kernel_user ++ + sub sp, sp, #S_FRAME_SIZE + ARM( stmib sp, {r1 - r12} ) + THUMB( stmia sp, {r0 - r12} ) +@@ -456,7 +553,9 @@ __und_usr: + tst r3, #PSR_T_BIT @ Thumb mode? + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 ++ pax_open_userland + 1: ldrt r0, [r4] ++ pax_close_userland + #ifdef CONFIG_CPU_ENDIAN_BE8 + rev r0, r0 @ little endian instruction + #endif +@@ -491,10 +590,14 @@ __und_usr_thumb: + */ + .arch armv6t2 + #endif ++ pax_open_userland + 2: ldrht r5, [r4] ++ pax_close_userland + cmp r5, #0xe800 @ 32bit instruction if xx != 0 + blo __und_usr_fault_16 @ 16bit undefined instruction ++ pax_open_userland + 3: ldrht r0, [r2] ++ pax_close_userland + add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update + orr r0, r0, r5, lsl #16 +@@ -733,7 +836,7 @@ ENTRY(__switch_to) + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack + THUMB( str sp, [ip], #4 ) + THUMB( str lr, [ip], #4 ) +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + ldr r6, [r2, #TI_CPU_DOMAIN] + #endif + set_tls r3, r4, r5 +@@ -742,7 +845,7 @@ ENTRY(__switch_to) + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] + #endif +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + mcr p15, 0, r6, c3, c0, 0 @ Set domain register + #endif + mov r5, r0 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S +index fefd7f9..e6f250e 100644 +--- a/arch/arm/kernel/entry-common.S ++++ b/arch/arm/kernel/entry-common.S +@@ -10,18 +10,46 @@ + + #include <asm/unistd.h> + #include <asm/ftrace.h> ++#include <asm/domain.h> + #include <asm/unwind.h> + ++#include "entry-header.S" ++ + #ifdef CONFIG_NEED_RET_TO_USER + #include <mach/entry-macro.S> + #else + .macro arch_ret_to_user, tmp1, tmp2 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r1, r2} ++ @ read DACR from cpu_domain into r1 ++ mov r2, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r2, r2, #(0x1fc0) ++ bic r2, r2, #(0x3f) ++ ldr r1, [r2, #TI_CPU_DOMAIN] ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_UDEREF ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r2, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r1, r2} ++#endif + .endm + #endif + +-#include "entry-header.S" +- +- + .align 5 + /* + * This is the fast syscall return path. We do as little as +@@ -351,6 +379,7 @@ ENDPROC(ftrace_stub) + + .align 5 + ENTRY(vector_swi) ++ + sub sp, sp, #S_FRAME_SIZE + stmia sp, {r0 - r12} @ Calling r0 - r12 + ARM( add r8, sp, #S_PC ) +@@ -400,6 +429,12 @@ ENTRY(vector_swi) + ldr scno, [lr, #-4] @ get SWI instruction + #endif + ++ /* ++ * do this here to avoid a performance hit of wrapping the code above ++ * that directly dereferences userland to parse the SWI instruction ++ */ ++ pax_enter_kernel_user ++ + #ifdef CONFIG_ALIGNMENT_TRAP + ldr ip, __cr_alignment + ldr ip, [ip] +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S +index 9a8531e..812e287 100644 +--- a/arch/arm/kernel/entry-header.S ++++ b/arch/arm/kernel/entry-header.S +@@ -73,9 +73,66 @@ + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + ++ .macro pax_enter_kernel_user ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++#endif ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ ++ .macro pax_exit_kernel ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read old DACR from stack into r1 ++ ldr r1, [sp, #(8 + S_SP)] ++ sub r1, r1, #8 ++ ldr r1, [r1] ++ ++ @ write r1 to current_thread_info()->cpu_domain ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ + #ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr + msr spsr_cxsf, \rpsr ++ ++ pax_exit_kernel ++ + #if defined(CONFIG_CPU_V6) + ldr r0, [sp] + strex r1, r2, [sp] @ clear the exclusive monitor +@@ -121,6 +178,9 @@ + .endm + #else /* CONFIG_THUMB2_KERNEL */ + .macro svc_exit, rpsr ++ ++ pax_exit_kernel ++ + ldr lr, [sp, #S_SP] @ top of the stack + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc + clrex @ clear the exclusive monitor +diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c +index 2adda11..7fbe958 100644 +--- a/arch/arm/kernel/fiq.c ++++ b/arch/arm/kernel/fiq.c +@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length) + #if defined(CONFIG_CPU_USE_DOMAINS) + memcpy((void *)0xffff001c, start, length); + #else ++ pax_open_kernel(); + memcpy(vectors_page + 0x1c, start, length); ++ pax_close_kernel(); + #endif + flush_icache_range(0xffff001c, 0xffff001c + length); + if (!vectors_high()) +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S +index 8bac553..caee108 100644 +--- a/arch/arm/kernel/head.S ++++ b/arch/arm/kernel/head.S +@@ -52,7 +52,9 @@ + .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE + + .macro pgtbl, rd, phys +- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE ++ mov \rd, #TEXT_OFFSET ++ sub \rd, #PG_DIR_SIZE ++ add \rd, \rd, \phys + .endm + + /* +@@ -434,7 +436,7 @@ __enable_mmu: + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_IO, DOMAIN_CLIENT)) ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT)) + mcr p15, 0, r5, c3, c0, 0 @ load domain access register + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer + #endif +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c +index 1fd749e..47adb08 100644 +--- a/arch/arm/kernel/hw_breakpoint.c ++++ b/arch/arm/kernel/hw_breakpoint.c +@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata dbg_reset_nb = { ++static struct notifier_block dbg_reset_nb = { + .notifier_call = dbg_reset_notify, + }; + +diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c +index 1e9be5d..03edbc2 100644 +--- a/arch/arm/kernel/module.c ++++ b/arch/arm/kernel/module.c +@@ -37,12 +37,37 @@ + #endif + + #ifdef CONFIG_MMU +-void *module_alloc(unsigned long size) ++static inline void *__module_alloc(unsigned long size, pgprot_t prot) + { ++ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR) ++ return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, +- GFP_KERNEL, PAGE_KERNEL_EXEC, -1, ++ GFP_KERNEL, prot, -1, + __builtin_return_address(0)); + } ++ ++void *module_alloc(unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return __module_alloc(size, PAGE_KERNEL); ++#else ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++#endif ++ ++} ++ ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++ ++void *module_alloc_exec(unsigned long size) ++{ ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++} ++#endif + #endif + + int +diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c +index 07314af..c46655c 100644 +--- a/arch/arm/kernel/patch.c ++++ b/arch/arm/kernel/patch.c +@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) + bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); + int size; + ++ pax_open_kernel(); + if (thumb2 && __opcode_is_thumb16(insn)) { + *(u16 *)addr = __opcode_to_mem_thumb16(insn); + size = sizeof(u16); +@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) + *(u32 *)addr = insn; + size = sizeof(u32); + } ++ pax_close_kernel(); + + flush_icache_range((uintptr_t)(addr), + (uintptr_t)(addr) + size); +diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c +index 1f2740e..b36e225 100644 +--- a/arch/arm/kernel/perf_event_cpu.c ++++ b/arch/arm/kernel/perf_event_cpu.c +@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { ++static struct notifier_block cpu_pmu_hotplug_notifier = { + .notifier_call = cpu_pmu_notify, + }; + +diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c +index 047d3e4..7e96107 100644 +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -28,7 +28,6 @@ + #include <linux/tick.h> + #include <linux/utsname.h> + #include <linux/uaccess.h> +-#include <linux/random.h> + #include <linux/hw_breakpoint.h> + #include <linux/cpuidle.h> + #include <linux/leds.h> +@@ -251,9 +250,10 @@ void machine_power_off(void) + machine_shutdown(); + if (pm_power_off) + pm_power_off(); ++ BUG(); + } + +-void machine_restart(char *cmd) ++__noreturn void machine_restart(char *cmd) + { + machine_shutdown(); + +@@ -278,8 +278,8 @@ void __show_regs(struct pt_regs *regs) + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); +- print_symbol("PC is at %s\n", instruction_pointer(regs)); +- print_symbol("LR is at %s\n", regs->ARM_lr); ++ printk("PC is at %pA\n", instruction_pointer(regs)); ++ printk("LR is at %pA\n", regs->ARM_lr); + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, +@@ -447,12 +447,6 @@ unsigned long get_wchan(struct task_struct *p) + return 0; + } + +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} +- + #ifdef CONFIG_MMU + /* + * The vectors page is always readable from user space for the +@@ -465,9 +459,8 @@ static int __init gate_vma_init(void) + { + gate_vma.vm_start = 0xffff0000; + gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; +- gate_vma.vm_page_prot = PAGE_READONLY_EXEC; +- gate_vma.vm_flags = VM_READ | VM_EXEC | +- VM_MAYREAD | VM_MAYEXEC; ++ gate_vma.vm_flags = VM_NONE; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + return 0; + } + arch_initcall(gate_vma_init); +diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c +index 3653164..d83e55d 100644 +--- a/arch/arm/kernel/psci.c ++++ b/arch/arm/kernel/psci.c +@@ -24,7 +24,7 @@ + #include <asm/opcodes-virt.h> + #include <asm/psci.h> + +-struct psci_operations psci_ops; ++struct psci_operations psci_ops __read_only; + + static int (*invoke_psci_fn)(u32, u32, u32, u32); + +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 03deeff..741ce88 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs, + return current_thread_info()->syscall; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) + { + current_thread_info()->syscall = scno; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + /* Do the secure computing check first; failures should be fast. */ + if (secure_computing(scno) == -1) + return -1; +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 234e339..81264a1 100644 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -96,21 +96,23 @@ EXPORT_SYMBOL(system_serial_high); + unsigned int elf_hwcap __read_mostly; + EXPORT_SYMBOL(elf_hwcap); + ++pteval_t __supported_pte_mask __read_only; ++pmdval_t __supported_pmd_mask __read_only; + + #ifdef MULTI_CPU +-struct processor processor __read_mostly; ++struct processor processor; + #endif + #ifdef MULTI_TLB +-struct cpu_tlb_fns cpu_tlb __read_mostly; ++struct cpu_tlb_fns cpu_tlb __read_only; + #endif + #ifdef MULTI_USER +-struct cpu_user_fns cpu_user __read_mostly; ++struct cpu_user_fns cpu_user __read_only; + #endif + #ifdef MULTI_CACHE +-struct cpu_cache_fns cpu_cache __read_mostly; ++struct cpu_cache_fns cpu_cache __read_only; + #endif + #ifdef CONFIG_OUTER_CACHE +-struct outer_cache_fns outer_cache __read_mostly; ++struct outer_cache_fns outer_cache __read_only; + EXPORT_SYMBOL(outer_cache); + #endif + +@@ -235,9 +237,13 @@ static int __get_cpu_architecture(void) + asm("mrc p15, 0, %0, c0, c1, 4" + : "=r" (mmfr0)); + if ((mmfr0 & 0x0000000f) >= 0x00000003 || +- (mmfr0 & 0x000000f0) >= 0x00000030) ++ (mmfr0 & 0x000000f0) >= 0x00000030) { + cpu_arch = CPU_ARCH_ARMv7; +- else if ((mmfr0 & 0x0000000f) == 0x00000002 || ++ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) { ++ __supported_pte_mask |= L_PTE_PXN; ++ __supported_pmd_mask |= PMD_PXNTABLE; ++ } ++ } else if ((mmfr0 & 0x0000000f) == 0x00000002 || + (mmfr0 & 0x000000f0) == 0x00000020) + cpu_arch = CPU_ARCH_ARMv6; + else +@@ -478,7 +484,7 @@ static void __init setup_processor(void) + __cpu_architecture = __get_cpu_architecture(); + + #ifdef MULTI_CPU +- processor = *list->proc; ++ memcpy((void *)&processor, list->proc, sizeof processor); + #endif + #ifdef MULTI_TLB + cpu_tlb = *list->tlb; +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index 296786b..a8d4dd5 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -396,22 +396,14 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, + __put_user(sigreturn_codes[idx+1], rc+1)) + return 1; + +- if (cpsr & MODE32_BIT) { +- /* +- * 32-bit code can use the new high-page +- * signal return code support. +- */ +- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; +- } else { +- /* +- * Ensure that the instruction cache sees +- * the return code written onto the stack. +- */ +- flush_icache_range((unsigned long)rc, +- (unsigned long)(rc + 2)); +- +- retcode = ((unsigned long)rc) + thumb; +- } ++ /* ++ * Ensure that the instruction cache sees ++ * the return code written onto the stack. ++ */ ++ flush_icache_range((unsigned long)rc, ++ (unsigned long)(rc + 2)); ++ ++ retcode = ((unsigned long)rc) + thumb; + } + + regs->ARM_r0 = map_sig(ksig->sig); +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index 1f2cccc..f40c02e 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -70,7 +70,7 @@ enum ipi_msg_type { + + static DECLARE_COMPLETION(cpu_running); + +-static struct smp_operations smp_ops; ++static struct smp_operations smp_ops __read_only; + + void __init smp_set_ops(struct smp_operations *ops) + { +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 1c08911..264f009 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); + void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) + { + #ifdef CONFIG_KALLSYMS +- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); ++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from); + #else + printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); + #endif +@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; + static int die_owner = -1; + static unsigned int die_nest_count; + ++extern void gr_handle_kernel_exploit(void); ++ + static unsigned long oops_begin(void) + { + int cpu; +@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); ++ ++ gr_handle_kernel_exploit(); ++ + if (signr) + do_exit(signr); + } +@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) + */ ++ pax_open_kernel(); + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; ++ pax_close_kernel(); + } + return 0; + +@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base) + */ + kuser_get_tls_init(vectors); + +- /* +- * Copy signal return handlers into the vector page, and +- * set sigreturn to be a pointer to these. +- */ +- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), +- sigreturn_codes, sizeof(sigreturn_codes)); +- + flush_icache_range(vectors, vectors + PAGE_SIZE); +- modify_domain(DOMAIN_USER, DOMAIN_CLIENT); ++ ++#ifndef CONFIG_PAX_MEMORY_UDEREF ++ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT); ++#endif ++ + } +diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S +index b571484..4b2fc9b 100644 +--- a/arch/arm/kernel/vmlinux.lds.S ++++ b/arch/arm/kernel/vmlinux.lds.S +@@ -8,7 +8,11 @@ + #include <asm/thread_info.h> + #include <asm/memory.h> + #include <asm/page.h> +- ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#include <asm/pgtable.h> ++#endif ++ + #define PROC_INFO \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ +@@ -94,6 +98,11 @@ SECTIONS + _text = .; + HEAD_TEXT + } ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif ++ + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + __exception_text_start = .; +@@ -116,6 +125,8 @@ SECTIONS + ARM_CPU_KEEP(PROC_INFO) + } + ++ _etext = .; /* End of text section */ ++ + RO_DATA(PAGE_SIZE) + + . = ALIGN(4); +@@ -146,7 +157,9 @@ SECTIONS + + NOTES + +- _etext = .; /* End of text and rodata section */ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif + + #ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); +@@ -207,6 +220,11 @@ SECTIONS + . = PAGE_OFFSET + TEXT_OFFSET; + #else + __init_end = .; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif ++ + . = ALIGN(THREAD_SIZE); + __data_loc = .; + #endif +diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S +index 14a0d98..7771a7d 100644 +--- a/arch/arm/lib/clear_user.S ++++ b/arch/arm/lib/clear_user.S +@@ -12,14 +12,14 @@ + + .text + +-/* Prototype: int __clear_user(void *addr, size_t sz) ++/* Prototype: int ___clear_user(void *addr, size_t sz) + * Purpose : clear some user memory + * Params : addr - user memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ + ENTRY(__clear_user_std) +-WEAK(__clear_user) ++WEAK(___clear_user) + stmfd sp!, {r1, lr} + mov r2, #0 + cmp r1, #4 +@@ -44,7 +44,7 @@ WEAK(__clear_user) + USER( strnebt r2, [r0]) + mov r0, #0 + ldmfd sp!, {r1, pc} +-ENDPROC(__clear_user) ++ENDPROC(___clear_user) + ENDPROC(__clear_user_std) + + .pushsection .fixup,"ax" +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S +index 66a477a..bee61d3 100644 +--- a/arch/arm/lib/copy_from_user.S ++++ b/arch/arm/lib/copy_from_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_from_user(void *to, const void *from, size_t n) ++ * size_t ___copy_from_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -84,11 +84,11 @@ + + .text + +-ENTRY(__copy_from_user) ++ENTRY(___copy_from_user) + + #include "copy_template.S" + +-ENDPROC(__copy_from_user) ++ENDPROC(___copy_from_user) + + .pushsection .fixup,"ax" + .align 0 +diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S +index 6ee2f67..d1cce76 100644 +--- a/arch/arm/lib/copy_page.S ++++ b/arch/arm/lib/copy_page.S +@@ -10,6 +10,7 @@ + * ASM optimised string functions + */ + #include <linux/linkage.h> ++#include <linux/const.h> + #include <asm/assembler.h> + #include <asm/asm-offsets.h> + #include <asm/cache.h> +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S +index d066df6..df28194 100644 +--- a/arch/arm/lib/copy_to_user.S ++++ b/arch/arm/lib/copy_to_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_to_user(void *to, const void *from, size_t n) ++ * size_t ___copy_to_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -88,11 +88,11 @@ + .text + + ENTRY(__copy_to_user_std) +-WEAK(__copy_to_user) ++WEAK(___copy_to_user) + + #include "copy_template.S" + +-ENDPROC(__copy_to_user) ++ENDPROC(___copy_to_user) + ENDPROC(__copy_to_user_std) + + .pushsection .fixup,"ax" +diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S +index 7d08b43..f7ca7ea 100644 +--- a/arch/arm/lib/csumpartialcopyuser.S ++++ b/arch/arm/lib/csumpartialcopyuser.S +@@ -57,8 +57,8 @@ + * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + */ + +-#define FN_ENTRY ENTRY(csum_partial_copy_from_user) +-#define FN_EXIT ENDPROC(csum_partial_copy_from_user) ++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user) ++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user) + + #include "csumpartialcopygeneric.S" + +diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c +index 64dbfa5..84a3fd9 100644 +--- a/arch/arm/lib/delay.c ++++ b/arch/arm/lib/delay.c +@@ -28,7 +28,7 @@ + /* + * Default to the loop-based delay implementation. + */ +-struct arm_delay_ops arm_delay_ops = { ++struct arm_delay_ops arm_delay_ops __read_only = { + .delay = __loop_delay, + .const_udelay = __loop_const_udelay, + .udelay = __loop_udelay, +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c +index 025f742..8432b08 100644 +--- a/arch/arm/lib/uaccess_with_memcpy.c ++++ b/arch/arm/lib/uaccess_with_memcpy.c +@@ -104,7 +104,7 @@ out: + } + + unsigned long +-__copy_to_user(void __user *to, const void *from, unsigned long n) ++___copy_to_user(void __user *to, const void *from, unsigned long n) + { + /* + * This test is stubbed out of the main function above to keep +diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c +index 49792a0..f192052 100644 +--- a/arch/arm/mach-kirkwood/common.c ++++ b/arch/arm/mach-kirkwood/common.c +@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw) + clk_gate_ops.disable(hw); + } + +-static struct clk_ops clk_gate_fn_ops; ++static int clk_gate_fn_is_enabled(struct clk_hw *hw) ++{ ++ return clk_gate_ops.is_enabled(hw); ++} ++ ++static struct clk_ops clk_gate_fn_ops = { ++ .enable = clk_gate_fn_enable, ++ .disable = clk_gate_fn_disable, ++ .is_enabled = clk_gate_fn_is_enabled, ++}; + + static struct clk __init *clk_register_gate_fn(struct device *dev, + const char *name, +@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev, + gate_fn->fn_en = fn_en; + gate_fn->fn_dis = fn_dis; + +- /* ops is the gate ops, but with our enable/disable functions */ +- if (clk_gate_fn_ops.enable != clk_gate_fn_enable || +- clk_gate_fn_ops.disable != clk_gate_fn_disable) { +- clk_gate_fn_ops = clk_gate_ops; +- clk_gate_fn_ops.enable = clk_gate_fn_enable; +- clk_gate_fn_ops.disable = clk_gate_fn_disable; +- } +- + clk = clk_register(dev, &gate_fn->gate.hw); + + if (IS_ERR(clk)) +diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c +index f6eeb87..cc90868 100644 +--- a/arch/arm/mach-omap2/board-n8x0.c ++++ b/arch/arm/mach-omap2/board-n8x0.c +@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev) + } + #endif + +-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { ++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { + .late_init = n8x0_menelaus_late_init, + }; + +diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c +index 410e1ba..1d2dd59 100644 +--- a/arch/arm/mach-omap2/gpmc.c ++++ b/arch/arm/mach-omap2/gpmc.c +@@ -145,7 +145,6 @@ struct omap3_gpmc_regs { + }; + + static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; +-static struct irq_chip gpmc_irq_chip; + static unsigned gpmc_irq_start; + + static struct resource gpmc_mem_root; +@@ -707,6 +706,18 @@ static void gpmc_irq_noop(struct irq_data *data) { } + + static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } + ++static struct irq_chip gpmc_irq_chip = { ++ .name = "gpmc", ++ .irq_startup = gpmc_irq_noop_ret, ++ .irq_enable = gpmc_irq_enable, ++ .irq_disable = gpmc_irq_disable, ++ .irq_shutdown = gpmc_irq_noop, ++ .irq_ack = gpmc_irq_noop, ++ .irq_mask = gpmc_irq_noop, ++ .irq_unmask = gpmc_irq_noop, ++ ++}; ++ + static int gpmc_setup_irq(void) + { + int i; +@@ -721,15 +732,6 @@ static int gpmc_setup_irq(void) + return gpmc_irq_start; + } + +- gpmc_irq_chip.name = "gpmc"; +- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; +- gpmc_irq_chip.irq_enable = gpmc_irq_enable; +- gpmc_irq_chip.irq_disable = gpmc_irq_disable; +- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; +- gpmc_irq_chip.irq_ack = gpmc_irq_noop; +- gpmc_irq_chip.irq_mask = gpmc_irq_noop; +- gpmc_irq_chip.irq_unmask = gpmc_irq_noop; +- + gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; + gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; + +diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c +index f8bb3b9..831e7b8 100644 +--- a/arch/arm/mach-omap2/omap-wakeupgen.c ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c +@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata irq_hotplug_notifier = { ++static struct notifier_block irq_hotplug_notifier = { + .notifier_call = irq_cpu_hotplug_notify, + }; + +diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c +index 381be7a..89b9c7e 100644 +--- a/arch/arm/mach-omap2/omap_device.c ++++ b/arch/arm/mach-omap2/omap_device.c +@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od) + struct platform_device __init *omap_device_build(const char *pdev_name, + int pdev_id, + struct omap_hwmod *oh, +- void *pdata, int pdata_len) ++ const void *pdata, int pdata_len) + { + struct omap_hwmod *ohs[] = { oh }; + +@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, + struct platform_device __init *omap_device_build_ss(const char *pdev_name, + int pdev_id, + struct omap_hwmod **ohs, +- int oh_cnt, void *pdata, ++ int oh_cnt, const void *pdata, + int pdata_len) + { + int ret = -ENOMEM; +diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h +index 044c31d..2ee0861 100644 +--- a/arch/arm/mach-omap2/omap_device.h ++++ b/arch/arm/mach-omap2/omap_device.h +@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev); + /* Core code interface */ + + struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, +- struct omap_hwmod *oh, void *pdata, ++ struct omap_hwmod *oh, const void *pdata, + int pdata_len); + + struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, + struct omap_hwmod **oh, int oh_cnt, +- void *pdata, int pdata_len); ++ const void *pdata, int pdata_len); + + struct omap_device *omap_device_alloc(struct platform_device *pdev, + struct omap_hwmod **ohs, int oh_cnt); +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c +index 3a750de..4c9b88f 100644 +--- a/arch/arm/mach-omap2/omap_hwmod.c ++++ b/arch/arm/mach-omap2/omap_hwmod.c +@@ -191,10 +191,10 @@ struct omap_hwmod_soc_ops { + int (*init_clkdm)(struct omap_hwmod *oh); + void (*update_context_lost)(struct omap_hwmod *oh); + int (*get_context_lost)(struct omap_hwmod *oh); +-}; ++} __no_const; + + /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ +-static struct omap_hwmod_soc_ops soc_ops; ++static struct omap_hwmod_soc_ops soc_ops __read_only; + + /* omap_hwmod_list contains all registered struct omap_hwmods */ + static LIST_HEAD(omap_hwmod_list); +diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c +index d15c7bb..b2d1f0c 100644 +--- a/arch/arm/mach-omap2/wd_timer.c ++++ b/arch/arm/mach-omap2/wd_timer.c +@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void) + struct omap_hwmod *oh; + char *oh_name = "wd_timer2"; + char *dev_name = "omap_wdt"; +- struct omap_wd_timer_platform_data pdata; ++ static struct omap_wd_timer_platform_data pdata = { ++ .read_reset_sources = prm_read_reset_sources ++ }; + + if (!cpu_class_is_omap2() || of_have_populated_dt()) + return 0; +@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void) + return -EINVAL; + } + +- pdata.read_reset_sources = prm_read_reset_sources; +- + pdev = omap_device_build(dev_name, id, oh, &pdata, + sizeof(struct omap_wd_timer_platform_data)); + WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", +diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h +index bddce2b..3eb04e2 100644 +--- a/arch/arm/mach-ux500/include/mach/setup.h ++++ b/arch/arm/mach-ux500/include/mach/setup.h +@@ -37,13 +37,6 @@ extern void ux500_timer_init(void); + .type = MT_DEVICE, \ + } + +-#define __MEM_DEV_DESC(x, sz) { \ +- .virtual = IO_ADDRESS(x), \ +- .pfn = __phys_to_pfn(x), \ +- .length = sz, \ +- .type = MT_MEMORY, \ +-} +- + extern struct smp_operations ux500_smp_ops; + extern void ux500_cpu_die(unsigned int cpu); + +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig +index 4045c49..0263c07 100644 +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -425,7 +425,7 @@ config CPU_32v5 + + config CPU_32v6 + bool +- select CPU_USE_DOMAINS if CPU_V6 && MMU ++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF + select TLS_REG_EMUL if !CPU_32v6K && !MMU + + config CPU_32v6K +@@ -574,6 +574,7 @@ config CPU_CP15_MPU + + config CPU_USE_DOMAINS + bool ++ depends on !ARM_LPAE && !PAX_KERNEXEC + help + This option enables or disables the use of domain switching + via the set_fs() function. +diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c +index db26e2e..ee44569 100644 +--- a/arch/arm/mm/alignment.c ++++ b/arch/arm/mm/alignment.c +@@ -211,10 +211,12 @@ union offset_union { + #define __get16_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v, a = addr; \ ++ pax_open_userland(); \ + __get8_unaligned_check(ins,v,a,err); \ + val = v << ((BE) ? 8 : 0); \ + __get8_unaligned_check(ins,v,a,err); \ + val |= v << ((BE) ? 0 : 8); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -228,6 +230,7 @@ union offset_union { + #define __get32_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v, a = addr; \ ++ pax_open_userland(); \ + __get8_unaligned_check(ins,v,a,err); \ + val = v << ((BE) ? 24 : 0); \ + __get8_unaligned_check(ins,v,a,err); \ +@@ -236,6 +239,7 @@ union offset_union { + val |= v << ((BE) ? 8 : 16); \ + __get8_unaligned_check(ins,v,a,err); \ + val |= v << ((BE) ? 0 : 24); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -249,6 +253,7 @@ union offset_union { + #define __put16_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v = val, a = addr; \ ++ pax_open_userland(); \ + __asm__( FIRST_BYTE_16 \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ +@@ -268,6 +273,7 @@ union offset_union { + " .popsection\n" \ + : "=r" (err), "=&r" (v), "=&r" (a) \ + : "0" (err), "1" (v), "2" (a)); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -281,6 +287,7 @@ union offset_union { + #define __put32_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v = val, a = addr; \ ++ pax_open_userland(); \ + __asm__( FIRST_BYTE_32 \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ +@@ -310,6 +317,7 @@ union offset_union { + " .popsection\n" \ + : "=r" (err), "=&r" (v), "=&r" (a) \ + : "0" (err), "1" (v), "2" (a)); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index 5dbf13f..1a60561 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -25,6 +25,7 @@ + #include <asm/system_misc.h> + #include <asm/system_info.h> + #include <asm/tlbflush.h> ++#include <asm/sections.h> + + #include "fault.h" + +@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, + if (fixup_exception(regs)) + return; + ++#ifdef CONFIG_PAX_KERNEXEC ++ if ((fsr & FSR_WRITE) && ++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) || ++ (MODULES_VADDR <= addr && addr < MODULES_END))) ++ { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ } ++#endif ++ + /* + * No handler, we'll have to terminate things with extreme prejudice. + */ +@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (fsr & FSR_LNX_PF) { ++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + tsk->thread.address = addr; + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; +@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + } + #endif /* CONFIG_MMU */ + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (__force unsigned char __user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-4: "); ++ for (i = -1; i < 20; i++) { ++ unsigned long c; ++ if (get_user(c, (__force unsigned long __user *)sp+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08lx ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * First Level Translation Fault Handler + * +@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + const struct fsr_info *inf = fsr_info + fsr_fs(fsr); + struct siginfo info; + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (addr < TASK_SIZE && is_domain_fault(fsr)) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ goto die; ++ } ++#endif ++ + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) + return; + ++die: + printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", + inf->name, fsr, addr); + +@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) + const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + struct siginfo info; + ++ if (user_mode(regs)) { ++ if (addr == 0xffff0fe0UL) { ++ /* ++ * PaX: __kuser_get_tls emulation ++ */ ++ regs->ARM_r0 = current_thread_info()->tp_value; ++ regs->ARM_pc = regs->ARM_lr; ++ return; ++ } ++ } ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), ++ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), ++ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr); ++ goto die; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { ++ unsigned int bkpt; ++ ++ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) { ++ current->thread.error_code = ifsr; ++ current->thread.trap_no = 0; ++ pax_report_refcount_overflow(regs); ++ fixup_exception(regs); ++ return; ++ } ++ } ++#endif ++ + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) + return; + ++die: + printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", + inf->name, ifsr, addr); + +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h +index cf08bdf..772656c 100644 +--- a/arch/arm/mm/fault.h ++++ b/arch/arm/mm/fault.h +@@ -3,6 +3,7 @@ + + /* + * Fault status register encodings. We steal bit 31 for our own purposes. ++ * Set when the FSR value is from an instruction fault. + */ + #define FSR_LNX_PF (1 << 31) + #define FSR_WRITE (1 << 11) +@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr) + } + #endif + ++/* valid for LPAE and !LPAE */ ++static inline int is_xn_fault(unsigned int fsr) ++{ ++ return ((fsr_fs(fsr) & 0x3c) == 0xc); ++} ++ ++static inline int is_domain_fault(unsigned int fsr) ++{ ++ return ((fsr_fs(fsr) & 0xD) == 0x9); ++} ++ + void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); + unsigned long search_exception_table(unsigned long addr); + +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c +index ad722f1..763fdd3 100644 +--- a/arch/arm/mm/init.c ++++ b/arch/arm/mm/init.c +@@ -30,6 +30,8 @@ + #include <asm/setup.h> + #include <asm/tlb.h> + #include <asm/fixmap.h> ++#include <asm/system_info.h> ++#include <asm/cp15.h> + + #include <asm/mach/arch.h> + #include <asm/mach/map.h> +@@ -736,7 +738,46 @@ void free_initmem(void) + { + #ifdef CONFIG_HAVE_TCM + extern char __tcm_start, __tcm_end; ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long addr; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ int cpu_arch = cpu_architecture(); ++ unsigned int cr = get_cr(); ++ ++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { ++ /* make pages tables, etc before .text NX */ ++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ __section_update(pmd, addr, PMD_SECT_XN); ++ } ++ /* make init NX */ ++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ __section_update(pmd, addr, PMD_SECT_XN); ++ } ++ /* make kernel code/rodata RX */ ++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_ARM_LPAE ++ __section_update(pmd, addr, PMD_SECT_RDONLY); ++#else ++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE); ++#endif ++ } ++ } ++#endif + ++#ifdef CONFIG_HAVE_TCM + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), + __phys_to_pfn(__pa(&__tcm_end)), +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c +index 04d9006..c547d85 100644 +--- a/arch/arm/mm/ioremap.c ++++ b/arch/arm/mm/ioremap.c +@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) + unsigned int mtype; + + if (cached) +- mtype = MT_MEMORY; ++ mtype = MT_MEMORY_RX; + else +- mtype = MT_MEMORY_NONCACHED; ++ mtype = MT_MEMORY_NONCACHED_RX; + + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index 10062ce..8695745 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct vm_area_struct *vma; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* +@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (len > TASK_SIZE) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + unsigned long addr = addr0; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* +@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_align) +@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + else + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + { + unsigned long random_factor = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* 8 bits of randomness in 20 address space bits */ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) +@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index a84ff76..f221c1d 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -36,6 +36,22 @@ + #include "mm.h" + #include "tcm.h" + ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++void modify_domain(unsigned int dom, unsigned int type) ++{ ++ struct thread_info *thread = current_thread_info(); ++ unsigned int domain = thread->cpu_domain; ++ /* ++ * DOMAIN_MANAGER might be defined to some other value, ++ * use the arch-defined constant ++ */ ++ domain &= ~domain_val(dom, 3); ++ thread->cpu_domain = domain | domain_val(dom, type); ++ set_domain(thread->cpu_domain); ++} ++EXPORT_SYMBOL(modify_domain); ++#endif ++ + /* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. +@@ -211,10 +227,18 @@ void adjust_cr(unsigned long mask, unsigned long set) + } + #endif + +-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN ++#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY + #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE + +-static struct mem_type mem_types[] = { ++#ifdef CONFIG_PAX_KERNEXEC ++#define L_PTE_KERNEXEC L_PTE_RDONLY ++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY ++#else ++#define L_PTE_KERNEXEC L_PTE_DIRTY ++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE ++#endif ++ ++static struct mem_type mem_types[] __read_only = { + [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ + .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | + L_PTE_SHARED, +@@ -243,16 +267,16 @@ static struct mem_type mem_types[] = { + [MT_UNCACHED] = { + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, ++ .prot_sect = PROT_SECT_DEVICE, + .domain = DOMAIN_IO, + }, + [MT_CACHECLEAN] = { +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, + #ifndef CONFIG_ARM_LPAE + [MT_MINICLEAN] = { +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, + #endif +@@ -260,36 +284,54 @@ static struct mem_type mem_types[] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, +- .domain = DOMAIN_USER, ++ .domain = DOMAIN_VECTORS, + }, + [MT_HIGH_VECTORS] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +- L_PTE_USER | L_PTE_RDONLY, ++ L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, +- .domain = DOMAIN_USER, ++ .domain = DOMAIN_VECTORS, + }, +- [MT_MEMORY] = { ++ [MT_MEMORY_RWX] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_RW] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, ++ .domain = DOMAIN_KERNEL, ++ }, ++ [MT_MEMORY_RX] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, ++ .domain = DOMAIN_KERNEL, ++ }, + [MT_ROM] = { +- .prot_sect = PMD_TYPE_SECT, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, +- [MT_MEMORY_NONCACHED] = { ++ [MT_MEMORY_NONCACHED_RW] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_MT_BUFFERABLE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_NONCACHED_RX] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC | ++ L_PTE_MT_BUFFERABLE, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, ++ .domain = DOMAIN_KERNEL, ++ }, + [MT_MEMORY_DTCM] = { +- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +- L_PTE_XN, ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_l1 = PMD_TYPE_TABLE, +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_ITCM] = { +@@ -299,10 +341,10 @@ static struct mem_type mem_types[] = { + }, + [MT_MEMORY_SO] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +- L_PTE_MT_UNCACHED | L_PTE_XN, ++ L_PTE_MT_UNCACHED, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | +- PMD_SECT_UNCACHED | PMD_SECT_XN, ++ PMD_SECT_UNCACHED, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_DMA_READY] = { +@@ -388,9 +430,35 @@ static void __init build_mem_type_table(void) + * to prevent speculative instruction fetches. + */ + mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN; + mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN; ++ ++ /* Mark other regions on ARMv6+ as execute-never */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN; ++ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN; ++#ifndef CONFIG_ARM_LPAE ++ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN; ++#endif ++ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN; ++#endif ++ ++ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN; + } + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { + /* +@@ -451,6 +519,9 @@ static void __init build_mem_type_table(void) + * from SVC mode and no access from userspace. + */ + mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++#ifdef CONFIG_PAX_KERNEXEC ++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++#endif + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + #endif +@@ -468,11 +539,17 @@ static void __init build_mem_type_table(void) + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; +- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; +- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; +- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED; + } + } + +@@ -483,15 +560,20 @@ static void __init build_mem_type_table(void) + if (cpu_arch >= CPU_ARCH_ARMv6) { + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { + /* Non-cacheable Normal is XCB = 001 */ +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ++ PMD_SECT_BUFFERED; ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= + PMD_SECT_BUFFERED; + } else { + /* For both ARMv6 and non-TEX-remapping ARMv7 */ +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ++ PMD_SECT_TEX(1); ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= + PMD_SECT_TEX(1); + } + } else { +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE; ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE; + } + + #ifdef CONFIG_ARM_LPAE +@@ -507,6 +589,8 @@ static void __init build_mem_type_table(void) + vecs_pgprot |= PTE_EXT_AF; + #endif + ++ user_pgprot |= __supported_pte_mask; ++ + for (i = 0; i < 16; i++) { + pteval_t v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); +@@ -524,10 +608,15 @@ static void __init build_mem_type_table(void) + + mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; + mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; +- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; +- mem_types[MT_MEMORY].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask; ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask; + mem_types[MT_ROM].prot_sect |= cp->pmd; + + switch (cp->pmd) { +@@ -1147,18 +1236,15 @@ void __init arm_mm_memblock_reserve(void) + * called function. This means you can't use any function or debugging + * method which may touch any device, otherwise the kernel _will_ crash. + */ ++ ++static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE); ++ + static void __init devicemaps_init(struct machine_desc *mdesc) + { + struct map_desc map; + unsigned long addr; +- void *vectors; +- +- /* +- * Allocate the vector page early. +- */ +- vectors = early_alloc(PAGE_SIZE); + +- early_trap_init(vectors); ++ early_trap_init(&vectors); + + for (addr = VMALLOC_START; addr; addr += PMD_SIZE) + pmd_clear(pmd_off_k(addr)); +@@ -1198,7 +1284,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) + * location (0xffff0000). If we aren't using high-vectors, also + * create a mapping at the low-vectors virtual address. + */ +- map.pfn = __phys_to_pfn(virt_to_phys(vectors)); ++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors)); + map.virtual = 0xffff0000; + map.length = PAGE_SIZE; + map.type = MT_HIGH_VECTORS; +@@ -1256,8 +1342,39 @@ static void __init map_lowmem(void) + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; +- map.type = MT_MEMORY; + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) { ++ struct map_desc kernel; ++ struct map_desc initmap; ++ ++ /* when freeing initmem we will make this RW */ ++ initmap.pfn = __phys_to_pfn(__pa(__init_begin)); ++ initmap.virtual = (unsigned long)__init_begin; ++ initmap.length = _sdata - __init_begin; ++ initmap.type = MT_MEMORY_RWX; ++ create_mapping(&initmap); ++ ++ /* when freeing initmem we will make this RX */ ++ kernel.pfn = __phys_to_pfn(__pa(_stext)); ++ kernel.virtual = (unsigned long)_stext; ++ kernel.length = __init_begin - _stext; ++ kernel.type = MT_MEMORY_RWX; ++ create_mapping(&kernel); ++ ++ if (map.virtual < (unsigned long)_stext) { ++ map.length = (unsigned long)_stext - map.virtual; ++ map.type = MT_MEMORY_RWX; ++ create_mapping(&map); ++ } ++ ++ map.pfn = __phys_to_pfn(__pa(_sdata)); ++ map.virtual = (unsigned long)_sdata; ++ map.length = end - __pa(_sdata); ++ } ++#endif ++ ++ map.type = MT_MEMORY_RW; + create_mapping(&map); + } + } +diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S +index 78f520b..31f0cb6 100644 +--- a/arch/arm/mm/proc-v7-2level.S ++++ b/arch/arm/mm/proc-v7-2level.S +@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext) + tst r1, #L_PTE_XN + orrne r3, r3, #PTE_EXT_XN + ++ tst r1, #L_PTE_PXN ++ orrne r3, r3, #PTE_EXT_PXN ++ + tst r1, #L_PTE_YOUNG + tstne r1, #L_PTE_VALID + #ifndef CONFIG_CPU_USE_DOMAINS +diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c +index a5bc92d..0bb4730 100644 +--- a/arch/arm/plat-omap/sram.c ++++ b/arch/arm/plat-omap/sram.c +@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size, + * Looks like we need to preserve some bootloader code at the + * beginning of SRAM for jumping to flash for reboot to work... + */ ++ pax_open_kernel(); + memset_io(omap_sram_base + omap_sram_skip, 0, + omap_sram_size - omap_sram_skip); ++ pax_close_kernel(); + } +diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h +index 1141782..0959d64 100644 +--- a/arch/arm/plat-samsung/include/plat/dma-ops.h ++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h +@@ -48,7 +48,7 @@ struct samsung_dma_ops { + int (*started)(unsigned ch); + int (*flush)(unsigned ch); + int (*stop)(unsigned ch); +-}; ++} __no_const; + + extern void *samsung_dmadev_get_ops(void); + extern void *s3c_dma_get_ops(void); +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c +index f4726dc..39ed646 100644 +--- a/arch/arm64/kernel/debug-monitors.c ++++ b/arch/arm64/kernel/debug-monitors.c +@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata os_lock_nb = { ++static struct notifier_block os_lock_nb = { + .notifier_call = os_lock_notify, + }; + +diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c +index 5ab825c..96aaec8 100644 +--- a/arch/arm64/kernel/hw_breakpoint.c ++++ b/arch/arm64/kernel/hw_breakpoint.c +@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { ++static struct notifier_block hw_breakpoint_reset_nb = { + .notifier_call = hw_breakpoint_reset_notify, + }; + +diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h +index c3a58a1..78fbf54 100644 +--- a/arch/avr32/include/asm/cache.h ++++ b/arch/avr32/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef __ASM_AVR32_CACHE_H + #define __ASM_AVR32_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h +index d232888..87c8df1 100644 +--- a/arch/avr32/include/asm/elf.h ++++ b/arch/avr32/include/asm/elf.h +@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00001000UL ++ ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h +index 479330b..53717a8 100644 +--- a/arch/avr32/include/asm/kmap_types.h ++++ b/arch/avr32/include/asm/kmap_types.h +@@ -2,9 +2,9 @@ + #define __ASM_AVR32_KMAP_TYPES_H + + #ifdef CONFIG_DEBUG_HIGHMEM +-# define KM_TYPE_NR 29 ++# define KM_TYPE_NR 30 + #else +-# define KM_TYPE_NR 14 ++# define KM_TYPE_NR 15 + #endif + + #endif /* __ASM_AVR32_KMAP_TYPES_H */ +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c +index b2f2d2d..d1c85cb 100644 +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) + + int exception_trace = 1; + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. +@@ -174,6 +191,16 @@ bad_area: + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + if (exception_trace && printk_ratelimit()) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", +diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h +index 568885a..f8008df 100644 +--- a/arch/blackfin/include/asm/cache.h ++++ b/arch/blackfin/include/asm/cache.h +@@ -7,6 +7,7 @@ + #ifndef __ARCH_BLACKFIN_CACHE_H + #define __ARCH_BLACKFIN_CACHE_H + ++#include <linux/const.h> + #include <linux/linkage.h> /* for asmlinkage */ + + /* +@@ -14,7 +15,7 @@ + * Blackfin loads 32 bytes for cache + */ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES +diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h +index aea2718..3639a60 100644 +--- a/arch/cris/include/arch-v10/arch/cache.h ++++ b/arch/cris/include/arch-v10/arch/cache.h +@@ -1,8 +1,9 @@ + #ifndef _ASM_ARCH_CACHE_H + #define _ASM_ARCH_CACHE_H + ++#include <linux/const.h> + /* Etrax 100LX have 32-byte cache-lines. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_ARCH_CACHE_H */ +diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h +index 7caf25d..ee65ac5 100644 +--- a/arch/cris/include/arch-v32/arch/cache.h ++++ b/arch/cris/include/arch-v32/arch/cache.h +@@ -1,11 +1,12 @@ + #ifndef _ASM_CRIS_ARCH_CACHE_H + #define _ASM_CRIS_ARCH_CACHE_H + ++#include <linux/const.h> + #include <arch/hwregs/dma.h> + + /* A cache-line is 32 bytes. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h +index b86329d..6709906 100644 +--- a/arch/frv/include/asm/atomic.h ++++ b/arch/frv/include/asm/atomic.h +@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v) + #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) + #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + { + int c, old; +diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h +index 2797163..c2a401d 100644 +--- a/arch/frv/include/asm/cache.h ++++ b/arch/frv/include/asm/cache.h +@@ -12,10 +12,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) + #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) +diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h +index 43901f2..0d8b865 100644 +--- a/arch/frv/include/asm/kmap_types.h ++++ b/arch/frv/include/asm/kmap_types.h +@@ -2,6 +2,6 @@ + #ifndef _ASM_KMAP_TYPES_H + #define _ASM_KMAP_TYPES_H + +-#define KM_TYPE_NR 17 ++#define KM_TYPE_NR 18 + + #endif +diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c +index 836f147..4cf23f5 100644 +--- a/arch/frv/mm/elf-fdpic.c ++++ b/arch/frv/mm/elf-fdpic.c +@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + { + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + if (len > TASK_SIZE) + return -ENOMEM; +@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + goto success; + } + +@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + info.high_limit = (current->mm->start_stack - 0x00200000); + info.align_mask = 0; + info.align_offset = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; +diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h +index f4ca594..adc72fd6 100644 +--- a/arch/hexagon/include/asm/cache.h ++++ b/arch/hexagon/include/asm/cache.h +@@ -21,9 +21,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> ++ + /* Bytes per L1 cache line */ +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __aligned(L1_CACHE_BYTES) + #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) +diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h +index 6e6fe18..a6ae668 100644 +--- a/arch/ia64/include/asm/atomic.h ++++ b/arch/ia64/include/asm/atomic.h +@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v) + #define atomic64_inc(v) atomic64_add(1, (v)) + #define atomic64_dec(v) atomic64_sub(1, (v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic operations are already serializing */ + #define smp_mb__before_atomic_dec() barrier() + #define smp_mb__after_atomic_dec() barrier() +diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h +index 988254a..e1ee885 100644 +--- a/arch/ia64/include/asm/cache.h ++++ b/arch/ia64/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef _ASM_IA64_CACHE_H + #define _ASM_IA64_CACHE_H + ++#include <linux/const.h> + + /* + * Copyright (C) 1998-2000 Hewlett-Packard Co +@@ -9,7 +10,7 @@ + + /* Bytes per L1 (data) cache line. */ + #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SMP + # define SMP_CACHE_SHIFT L1_CACHE_SHIFT +diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h +index 5a83c5c..4d7f553 100644 +--- a/arch/ia64/include/asm/elf.h ++++ b/arch/ia64/include/asm/elf.h +@@ -42,6 +42,13 @@ + */ + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#endif ++ + #define PT_IA_64_UNWIND 0x70000001 + + /* IA-64 relocations: */ +diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h +index 96a8d92..617a1cf 100644 +--- a/arch/ia64/include/asm/pgalloc.h ++++ b/arch/ia64/include/asm/pgalloc.h +@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) + pgd_val(*pgd_entry) = __pa(pud); + } + ++static inline void ++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) ++{ ++ pgd_populate(mm, pgd_entry, pud); ++} ++ + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return quicklist_alloc(0, GFP_KERNEL, NULL); +@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) + pud_val(*pud_entry) = __pa(pmd); + } + ++static inline void ++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) ++{ ++ pud_populate(mm, pud_entry, pmd); ++} ++ + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return quicklist_alloc(0, GFP_KERNEL, NULL); +diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h +index 815810c..d60bd4c 100644 +--- a/arch/ia64/include/asm/pgtable.h ++++ b/arch/ia64/include/asm/pgtable.h +@@ -12,7 +12,7 @@ + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +- ++#include <linux/const.h> + #include <asm/mman.h> + #include <asm/page.h> + #include <asm/processor.h> +@@ -142,6 +142,17 @@ + #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) ++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++# define PAGE_COPY_NOEXEC PAGE_COPY ++#endif ++ + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) + #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) + #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) +diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h +index 54ff557..70c88b7 100644 +--- a/arch/ia64/include/asm/spinlock.h ++++ b/arch/ia64/include/asm/spinlock.h +@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; + + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); +- ACCESS_ONCE(*p) = (tmp + 2) & ~1; ++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; + } + + static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) +diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h +index 449c8c0..18965fb 100644 +--- a/arch/ia64/include/asm/uaccess.h ++++ b/arch/ia64/include/asm/uaccess.h +@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use + static inline unsigned long + __copy_to_user (void __user *to, const void *from, unsigned long count) + { ++ if (count > INT_MAX) ++ return count; ++ ++ if (!__builtin_constant_p(count)) ++ check_object_size(from, count, true); ++ + return __copy_user(to, (__force void __user *) from, count); + } + + static inline unsigned long + __copy_from_user (void *to, const void __user *from, unsigned long count) + { ++ if (count > INT_MAX) ++ return count; ++ ++ if (!__builtin_constant_p(count)) ++ check_object_size(to, count, false); ++ + return __copy_user((__force void __user *) to, from, count); + } + +@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + ({ \ + void __user *__cu_to = (to); \ + const void *__cu_from = (from); \ +- long __cu_len = (n); \ ++ unsigned long __cu_len = (n); \ + \ +- if (__access_ok(__cu_to, __cu_len, get_fs())) \ ++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \ ++ if (!__builtin_constant_p(n)) \ ++ check_object_size(__cu_from, __cu_len, true); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ ++ } \ + __cu_len; \ + }) + +@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + ({ \ + void *__cu_to = (to); \ + const void __user *__cu_from = (from); \ +- long __cu_len = (n); \ ++ unsigned long __cu_len = (n); \ + \ + __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ ++ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \ ++ if (!__builtin_constant_p(n)) \ ++ check_object_size(__cu_to, __cu_len, false); \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ ++ } \ + __cu_len; \ + }) + +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c +index 2d67317..07d8bfa 100644 +--- a/arch/ia64/kernel/err_inject.c ++++ b/arch/ia64/kernel/err_inject.c +@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata err_inject_cpu_notifier = ++static struct notifier_block err_inject_cpu_notifier = + { + .notifier_call = err_inject_cpu_callback, + }; +diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c +index d7396db..b33e873 100644 +--- a/arch/ia64/kernel/mca.c ++++ b/arch/ia64/kernel/mca.c +@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block mca_cpu_notifier __cpuinitdata = { ++static struct notifier_block mca_cpu_notifier = { + .notifier_call = mca_cpu_callback + }; + +diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c +index 24603be..948052d 100644 +--- a/arch/ia64/kernel/module.c ++++ b/arch/ia64/kernel/module.c +@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) + void + module_free (struct module *mod, void *module_region) + { +- if (mod && mod->arch.init_unw_table && +- module_region == mod->module_init) { ++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { + unw_remove_unwind_table(mod->arch.init_unw_table); + mod->arch.init_unw_table = NULL; + } +@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, + } + + static inline int ++in_init_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; ++} ++ ++static inline int ++in_init_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; ++} ++ ++static inline int + in_init (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_init < mod->init_size; ++ return in_init_rx(mod, addr) || in_init_rw(mod, addr); ++} ++ ++static inline int ++in_core_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; ++} ++ ++static inline int ++in_core_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; + } + + static inline int + in_core (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_core < mod->core_size; ++ return in_core_rx(mod, addr) || in_core_rw(mod, addr); + } + + static inline int +@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, + break; + + case RV_BDREL: +- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); ++ if (in_init_rx(mod, val)) ++ val -= (uint64_t) mod->module_init_rx; ++ else if (in_init_rw(mod, val)) ++ val -= (uint64_t) mod->module_init_rw; ++ else if (in_core_rx(mod, val)) ++ val -= (uint64_t) mod->module_core_rx; ++ else if (in_core_rw(mod, val)) ++ val -= (uint64_t) mod->module_core_rw; + break; + + case RV_LTV: +@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind + * addresses have been selected... + */ + uint64_t gp; +- if (mod->core_size > MAX_LTOFF) ++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) + /* + * This takes advantage of fact that SHF_ARCH_SMALL gets allocated + * at the end of the module. + */ +- gp = mod->core_size - MAX_LTOFF / 2; ++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; + else +- gp = mod->core_size / 2; +- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); ++ gp = (mod->core_size_rx + mod->core_size_rw) / 2; ++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); + mod->arch.gp = gp; + DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); + } +diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c +index 79521d5..43dddff 100644 +--- a/arch/ia64/kernel/palinfo.c ++++ b/arch/ia64/kernel/palinfo.c +@@ -1006,7 +1006,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata palinfo_cpu_notifier = ++static struct notifier_block palinfo_cpu_notifier = + { + .notifier_call = palinfo_cpu_callback, + .priority = 0, +diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c +index aa527d7..f237752 100644 +--- a/arch/ia64/kernel/salinfo.c ++++ b/arch/ia64/kernel/salinfo.c +@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu + return NOTIFY_OK; + } + +-static struct notifier_block salinfo_cpu_notifier __cpuinitdata = ++static struct notifier_block salinfo_cpu_notifier = + { + .notifier_call = salinfo_cpu_callback, + .priority = 0, +diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c +index 41e33f8..65180b2 100644 +--- a/arch/ia64/kernel/sys_ia64.c ++++ b/arch/ia64/kernel/sys_ia64.c +@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + unsigned long align_mask = 0; + struct mm_struct *mm = current->mm; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + if (len > RGN_MAP_LIMIT) + return -ENOMEM; +@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + if (REGION_NUMBER(addr) == RGN_HPAGE) + addr = 0; + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ addr = mm->free_area_cache; ++ else ++#endif ++ + if (!addr) + addr = TASK_UNMAPPED_BASE; + +@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + info.high_limit = TASK_SIZE; + info.align_mask = align_mask; + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c +index dc00b2c..cce53c2 100644 +--- a/arch/ia64/kernel/topology.c ++++ b/arch/ia64/kernel/topology.c +@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata cache_cpu_notifier = ++static struct notifier_block cache_cpu_notifier = + { + .notifier_call = cache_cpu_callback + }; +diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S +index 0ccb28f..8992469 100644 +--- a/arch/ia64/kernel/vmlinux.lds.S ++++ b/arch/ia64/kernel/vmlinux.lds.S +@@ -198,7 +198,7 @@ SECTIONS { + /* Per-cpu data: */ + . = ALIGN(PERCPU_PAGE_SIZE); + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) +- __phys_per_cpu_start = __per_cpu_load; ++ __phys_per_cpu_start = per_cpu_load; + /* + * ensure percpu data fits + * into percpu page size +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c +index 6cf0341..d352594 100644 +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) + return pte_present(pte); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + # define VM_READ_BIT 0 + # define VM_WRITE_BIT 1 + # define VM_EXEC_BIT 2 +@@ -149,8 +166,21 @@ retry: + if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) + goto bad_area; + +- if ((vma->vm_flags & mask) != mask) ++ if ((vma->vm_flags & mask) != mask) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c +index 76069c1..c2aa816 100644 +--- a/arch/ia64/mm/hugetlbpage.c ++++ b/arch/ia64/mm/hugetlbpage.c +@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u + unsigned long pgoff, unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags); + + if (len > RGN_MAP_LIMIT) + return -ENOMEM; +@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u + info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; + info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c +index 20bc967..a26993e 100644 +--- a/arch/ia64/mm/init.c ++++ b/arch/ia64/mm/init.c +@@ -120,6 +120,19 @@ ia64_init_addr_space (void) + vma->vm_start = current->thread.rbs_bot & PAGE_MASK; + vma->vm_end = vma->vm_start + PAGE_SIZE; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { ++ vma->vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ vma->vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + down_write(¤t->mm->mmap_sem); + if (insert_vm_struct(current->mm, vma)) { +diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h +index 40b3ee9..8c2c112 100644 +--- a/arch/m32r/include/asm/cache.h ++++ b/arch/m32r/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef _ASM_M32R_CACHE_H + #define _ASM_M32R_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache line size */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_M32R_CACHE_H */ +diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c +index 82abd15..d95ae5d 100644 +--- a/arch/m32r/lib/usercopy.c ++++ b/arch/m32r/lib/usercopy.c +@@ -14,6 +14,9 @@ + unsigned long + __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetch(from); + if (access_ok(VERIFY_WRITE, to, n)) + __copy_user(to,from,n); +@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + unsigned long + __generic_copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetchw(to); + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to,from,n); +diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h +index 0395c51..5f26031 100644 +--- a/arch/m68k/include/asm/cache.h ++++ b/arch/m68k/include/asm/cache.h +@@ -4,9 +4,11 @@ + #ifndef __ARCH_M68K_CACHE_H + #define __ARCH_M68K_CACHE_H + ++#include <linux/const.h> ++ + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c +index 3c52fa6..11b2ad8 100644 +--- a/arch/metag/mm/hugetlbpage.c ++++ b/arch/metag/mm/hugetlbpage.c +@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len) + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & HUGEPT_MASK; + info.align_offset = 0; ++ info.threadstack_offset = 0; + return vm_unmapped_area(&info); + } + +diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h +index 4efe96a..60e8699 100644 +--- a/arch/microblaze/include/asm/cache.h ++++ b/arch/microblaze/include/asm/cache.h +@@ -13,11 +13,12 @@ + #ifndef _ASM_MICROBLAZE_CACHE_H + #define _ASM_MICROBLAZE_CACHE_H + ++#include <linux/const.h> + #include <asm/registers.h> + + #define L1_CACHE_SHIFT 5 + /* word-granular cache in microblaze */ +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h +index 08b6079..eb272cf 100644 +--- a/arch/mips/include/asm/atomic.h ++++ b/arch/mips/include/asm/atomic.h +@@ -21,6 +21,10 @@ + #include <asm/cmpxchg.h> + #include <asm/war.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + + /* +@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + */ + #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* CONFIG_64BIT */ + + /* +diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h +index b4db69f..8f3b093 100644 +--- a/arch/mips/include/asm/cache.h ++++ b/arch/mips/include/asm/cache.h +@@ -9,10 +9,11 @@ + #ifndef _ASM_CACHE_H + #define _ASM_CACHE_H + ++#include <linux/const.h> + #include <kmalloc.h> + + #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_SHIFT L1_CACHE_SHIFT + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h +index cf3ae24..238d22f 100644 +--- a/arch/mips/include/asm/elf.h ++++ b/arch/mips/include/asm/elf.h +@@ -372,13 +372,16 @@ extern const char *__elf_platform; + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + #endif + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + struct linux_binprm; + extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* _ASM_ELF_H */ +diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h +index c1f6afa..38cc6e9 100644 +--- a/arch/mips/include/asm/exec.h ++++ b/arch/mips/include/asm/exec.h +@@ -12,6 +12,6 @@ + #ifndef _ASM_EXEC_H + #define _ASM_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* _ASM_EXEC_H */ +diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h +index eab99e5..607c98e 100644 +--- a/arch/mips/include/asm/page.h ++++ b/arch/mips/include/asm/page.h +@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, + #ifdef CONFIG_CPU_MIPS32 + typedef struct { unsigned long pte_low, pte_high; } pte_t; + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) +- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) ++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) + #else + typedef struct { unsigned long long pte; } pte_t; + #define pte_val(x) ((x).pte) +diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h +index 881d18b..cea38bc 100644 +--- a/arch/mips/include/asm/pgalloc.h ++++ b/arch/mips/include/asm/pgalloc.h +@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + { + set_pud(pud, __pud((unsigned long)pmd)); + } ++ ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} + #endif + + /* +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h +index 178f792..8ebc510 100644 +--- a/arch/mips/include/asm/thread_info.h ++++ b/arch/mips/include/asm/thread_info.h +@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); + #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */ + #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */ + #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ ++/* li takes a 32bit immediate */ ++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */ + #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ + + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28"); + #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR) + #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) ++ ++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ +-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) ++#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK \ + (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME) + /* work to do on any return to u-space */ +-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT) ++#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID) + + #endif /* __KERNEL__ */ + +diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c +index e06f777..3244284 100644 +--- a/arch/mips/kernel/binfmt_elfn32.c ++++ b/arch/mips/kernel/binfmt_elfn32.c +@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + #include <linux/module.h> + #include <linux/elfcore.h> +diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c +index 556a435..b4fd2e3 100644 +--- a/arch/mips/kernel/binfmt_elfo32.c ++++ b/arch/mips/kernel/binfmt_elfo32.c +@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + + /* +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index 3be4405..a799827 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -461,15 +461,3 @@ unsigned long get_wchan(struct task_struct *task) + out: + return pc; + } +- +-/* +- * Don't forget that the stack pointer must be aligned on a 8 bytes +- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. +- */ +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- +- return sp & ALMASK; +-} +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 9c6299c..2fb4c22 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -528,6 +528,10 @@ static inline int audit_arch(void) + return arch; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace +@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) + /* do the secure computing check first */ + secure_computing_strict(regs->regs[2]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (!(current->ptrace & PT_PTRACED)) + goto out; + +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S +index 9ea2964..c4329c3 100644 +--- a/arch/mips/kernel/scall32-o32.S ++++ b/arch/mips/kernel/scall32-o32.S +@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp) + + stack_done: + lw t0, TI_FLAGS($28) # syscall tracing enabled? +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + and t0, t1 + bnez t0, syscall_trace_entry # -> yes + +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S +index 36cfd40..b1436e0 100644 +--- a/arch/mips/kernel/scall64-64.S ++++ b/arch/mips/kernel/scall64-64.S +@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, syscall_trace_entry +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S +index 693d60b..ae0ba75 100644 +--- a/arch/mips/kernel/scall64-n32.S ++++ b/arch/mips/kernel/scall64-n32.S +@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) + + sd a3, PT_R26(sp) # save a3 for syscall restarting + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, n32_syscall_trace_entry +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S +index af8887f..611ccb6 100644 +--- a/arch/mips/kernel/scall64-o32.S ++++ b/arch/mips/kernel/scall64-o32.S +@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp) + PTR 4b, bad_stack + .previous + +- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT ++ li t1, _TIF_SYSCALL_WORK + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, trace_a_syscall +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c +index 0fead53..a2c0fb5 100644 +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -27,6 +27,23 @@ + #include <asm/highmem.h> /* For VMALLOC_END */ + #include <linux/kdebug.h> + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index 7e5fe27..9656513 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + struct vm_area_struct *vma; + unsigned long addr = addr0; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (unlikely(len > TASK_SIZE)) +@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + do_color_align = 1; + + /* requesting a specific address */ ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset)) + return addr; + } + + info.length = len; + info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; +@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + { + unsigned long random_factor = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + random_factor = random_factor << PAGE_SHIFT; +@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } + } + +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = get_random_int(); +- +- rnd = rnd << PAGE_SHIFT; +- /* 8MB for 32bit, 256MB for 64bit */ +- if (TASK_IS_32BIT_ADDR) +- rnd = rnd & 0x7ffffful; +- else +- rnd = rnd & 0xffffffful; +- +- return rnd; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- + int __virt_addr_valid(const volatile void *kaddr) + { + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h +index 967d144..db12197 100644 +--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h ++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h +@@ -11,12 +11,14 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache */ + + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 256 /* number of entries in each way */ +-#define L1_CACHE_BYTES 16 /* bytes per entry */ + #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +index bcb5df2..84fabd2 100644 +--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h ++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +@@ -16,13 +16,15 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* + * L1 cache + */ + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 128 /* number of entries in each way */ +-#define L1_CACHE_BYTES 32 /* bytes per entry */ + #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h +index 4ce7a01..449202a 100644 +--- a/arch/openrisc/include/asm/cache.h ++++ b/arch/openrisc/include/asm/cache.h +@@ -19,11 +19,13 @@ + #ifndef __ASM_OPENRISC_CACHE_H + #define __ASM_OPENRISC_CACHE_H + ++#include <linux/const.h> ++ + /* FIXME: How can we replace these with values from the CPU... + * they shouldn't be hard-coded! + */ + +-#define L1_CACHE_BYTES 16 + #define L1_CACHE_SHIFT 4 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* __ASM_OPENRISC_CACHE_H */ +diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h +index f38e198..4179e38 100644 +--- a/arch/parisc/include/asm/atomic.h ++++ b/arch/parisc/include/asm/atomic.h +@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* !CONFIG_64BIT */ + + +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h +index 47f11c7..3420df2 100644 +--- a/arch/parisc/include/asm/cache.h ++++ b/arch/parisc/include/asm/cache.h +@@ -5,6 +5,7 @@ + #ifndef __ARCH_PARISC_CACHE_H + #define __ARCH_PARISC_CACHE_H + ++#include <linux/const.h> + + /* + * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have +@@ -15,13 +16,13 @@ + * just ruin performance. + */ + #ifdef CONFIG_PA20 +-#define L1_CACHE_BYTES 64 + #define L1_CACHE_SHIFT 6 + #else +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) ++ + #ifndef __ASSEMBLY__ + + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h +index ad2b503..bdf1651 100644 +--- a/arch/parisc/include/asm/elf.h ++++ b/arch/parisc/include/asm/elf.h +@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ +diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h +index fc987a1..6e068ef 100644 +--- a/arch/parisc/include/asm/pgalloc.h ++++ b/arch/parisc/include/asm/pgalloc.h +@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); + } + ++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) ++{ ++ pgd_populate(mm, pgd, pmd); ++} ++ + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) + { + pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, +@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) + #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) + #define pmd_free(mm, x) do { } while (0) + #define pgd_populate(mm, pmd, pte) BUG() ++#define pgd_populate_kernel(mm, pmd, pte) BUG() + + #endif + +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h +index 1e40d7f..a3eb445 100644 +--- a/arch/parisc/include/asm/pgtable.h ++++ b/arch/parisc/include/asm/pgtable.h +@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) + #define PAGE_COPY PAGE_EXECREAD + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) + #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index e0a8235..ce2f1e1 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + int ret = -EFAULT; + +- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) ++ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n)) + ret = __copy_from_user(to, from, n); + else + copy_from_user_overflow(); +diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c +index 2a625fb..9908930 100644 +--- a/arch/parisc/kernel/module.c ++++ b/arch/parisc/kernel/module.c +@@ -98,16 +98,38 @@ + + /* three functions to determine where in the module core + * or init pieces the location is */ ++static inline int in_init_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rx && ++ loc < (me->module_init_rx + me->init_size_rx)); ++} ++ ++static inline int in_init_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rw && ++ loc < (me->module_init_rw + me->init_size_rw)); ++} ++ + static inline int in_init(struct module *me, void *loc) + { +- return (loc >= me->module_init && +- loc <= (me->module_init + me->init_size)); ++ return in_init_rx(me, loc) || in_init_rw(me, loc); ++} ++ ++static inline int in_core_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rx && ++ loc < (me->module_core_rx + me->core_size_rx)); ++} ++ ++static inline int in_core_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rw && ++ loc < (me->module_core_rw + me->core_size_rw)); + } + + static inline int in_core(struct module *me, void *loc) + { +- return (loc >= me->module_core && +- loc <= (me->module_core + me->core_size)); ++ return in_core_rx(me, loc) || in_core_rw(me, loc); + } + + static inline int in_local(struct module *me, void *loc) +@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, + } + + /* align things a bit */ +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.got_offset = me->core_size; +- me->core_size += gots * sizeof(struct got_entry); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += gots * sizeof(struct got_entry); + +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.fdesc_offset = me->core_size; +- me->core_size += fdescs * sizeof(Elf_Fdesc); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.fdesc_offset = me->core_size_rw; ++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); + + me->arch.got_max = gots; + me->arch.fdesc_max = fdescs; +@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + + BUG_ON(value == 0); + +- got = me->module_core + me->arch.got_offset; ++ got = me->module_core_rw + me->arch.got_offset; + for (i = 0; got[i].addr; i++) + if (got[i].addr == value) + goto out; +@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + #ifdef CONFIG_64BIT + static Elf_Addr get_fdesc(struct module *me, unsigned long value) + { +- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; ++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; + + if (!value) { + printk(KERN_ERR "%s: zero OPD requested!\n", me->name); +@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) + + /* Create new one */ + fdesc->addr = value; +- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + return (Elf_Addr)fdesc; + } + #endif /* CONFIG_64BIT */ +@@ -843,7 +865,7 @@ register_unwind_table(struct module *me, + + table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; + end = table + sechdrs[me->arch.unwind_section].sh_size; +- gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + + DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", + me->arch.unwind_section, table, end, gp); +diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c +index 5dfd248..64914ac 100644 +--- a/arch/parisc/kernel/sys_parisc.c ++++ b/arch/parisc/kernel/sys_parisc.c +@@ -33,9 +33,11 @@ + #include <linux/utsname.h> + #include <linux/personality.h> + +-static unsigned long get_unshared_area(unsigned long addr, unsigned long len) ++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len, ++ unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + info.flags = 0; + info.length = len; +@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) + info.high_limit = TASK_SIZE; + info.align_mask = 0; + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping) + return (unsigned long) mapping >> 8; + } + +-static unsigned long get_shared_area(struct address_space *mapping, +- unsigned long addr, unsigned long len, unsigned long pgoff) ++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping, ++ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + info.flags = 0; + info.length = len; +@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping, + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & (SHMLBA - 1); + info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + return -EINVAL; + return addr; + } +- if (!addr) ++ if (!addr) { + addr = TASK_UNMAPPED_BASE; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) ++ addr += current->mm->delta_mmap; ++#endif ++ ++ } ++ + if (filp) { +- addr = get_shared_area(filp->f_mapping, addr, len, pgoff); ++ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags); + } else if(flags & MAP_SHARED) { +- addr = get_shared_area(NULL, addr, len, pgoff); ++ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags); + } else { +- addr = get_unshared_area(addr, len); ++ addr = get_unshared_area(filp, addr, len, flags); + } + return addr; + } +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c +index c6ae9f5..e9c3cf4 100644 +--- a/arch/parisc/kernel/traps.c ++++ b/arch/parisc/kernel/traps.c +@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm,regs->iaoq[0]); +- if (vma && (regs->iaoq[0] >= vma->vm_start) +- && (vma->vm_flags & VM_EXEC)) { +- ++ if (vma && (regs->iaoq[0] >= vma->vm_start)) { + fault_address = regs->iaoq[0]; + fault_space = regs->iasq[0]; + +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index f247a34..dc0f219 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -15,6 +15,7 @@ + #include <linux/sched.h> + #include <linux/interrupt.h> + #include <linux/module.h> ++#include <linux/unistd.h> + + #include <asm/uaccess.h> + #include <asm/traps.h> +@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data); + static unsigned long + parisc_acctyp(unsigned long code, unsigned int inst) + { +- if (code == 6 || code == 16) ++ if (code == 6 || code == 7 || code == 16) + return VM_EXEC; + + switch (inst & 0xf0000000) { +@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when rt_sigreturn trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int bl, depwi; ++ ++ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); ++ ++ if (err) ++ break; ++ ++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { ++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; ++ ++ err = get_user(ldw, (unsigned int *)addr); ++ err |= get_user(bv, (unsigned int *)(addr+4)); ++ err |= get_user(ldw2, (unsigned int *)(addr+8)); ++ ++ if (err) ++ break; ++ ++ if (ldw == 0x0E801096U && ++ bv == 0xEAC0C000U && ++ ldw2 == 0x0E881095U) ++ { ++ unsigned int resolver, map; ++ ++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); ++ if (err) ++ break; ++ ++ regs->gr[20] = instruction_pointer(regs)+8; ++ regs->gr[21] = map; ++ regs->gr[22] = resolver; ++ regs->iaoq[0] = resolver | 3UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ ++#ifndef CONFIG_PAX_EMUSIGRT ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++#endif ++ ++ do { /* PaX: rt_sigreturn emulation */ ++ unsigned int ldi1, ldi2, bel, nop; ++ ++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); ++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); ++ ++ if (err) ++ break; ++ ++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && ++ ldi2 == 0x3414015AU && ++ bel == 0xE4008200U && ++ nop == 0x08000240U) ++ { ++ regs->gr[25] = (ldi1 & 2) >> 1; ++ regs->gr[20] = __NR_rt_sigreturn; ++ regs->gr[31] = regs->iaoq[1] + 16; ++ regs->sr[0] = regs->iasq[1]; ++ regs->iaoq[0] = 0x100UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ regs->iasq[0] = regs->sr[2]; ++ regs->iasq[1] = regs->sr[2]; ++ return 2; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + int fixup_exception(struct pt_regs *regs) + { + const struct exception_table_entry *fix; +@@ -194,8 +305,33 @@ good_area: + + acc_type = parisc_acctyp(code,regs->iir); + +- if ((vma->vm_flags & acc_type) != acc_type) ++ if ((vma->vm_flags & acc_type) != acc_type) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && ++ (address & ~3UL) == instruction_pointer(regs)) ++ { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 3: ++ return; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ case 2: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h +index e3b1d41..8e81edf 100644 +--- a/arch/powerpc/include/asm/atomic.h ++++ b/arch/powerpc/include/asm/atomic.h +@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) + return t1; + } + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* __powerpc64__ */ + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h +index 9e495c9..b6878e5 100644 +--- a/arch/powerpc/include/asm/cache.h ++++ b/arch/powerpc/include/asm/cache.h +@@ -3,6 +3,7 @@ + + #ifdef __KERNEL__ + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +@@ -22,7 +23,7 @@ + #define L1_CACHE_SHIFT 7 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h +index ac9790f..6d30741 100644 +--- a/arch/powerpc/include/asm/elf.h ++++ b/arch/powerpc/include/asm/elf.h +@@ -28,8 +28,19 @@ + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) ++#define ELF_ET_DYN_BASE (0x20000000) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (0x10000000UL) ++ ++#ifdef __powerpc64__ ++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) ++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) ++#else ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif ++#endif + + /* + * Our registers are always unsigned longs, whether we're a 32 bit +@@ -122,10 +133,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, + (0x7ff >> (PAGE_SHIFT - 12)) : \ + (0x3ffff >> (PAGE_SHIFT - 12))) + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- +- + #ifdef CONFIG_SPU_BASE + /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ + #define NT_SPU 1 +diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h +index 8196e9c..d83a9f3 100644 +--- a/arch/powerpc/include/asm/exec.h ++++ b/arch/powerpc/include/asm/exec.h +@@ -4,6 +4,6 @@ + #ifndef _ASM_POWERPC_EXEC_H + #define _ASM_POWERPC_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* _ASM_POWERPC_EXEC_H */ +diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h +index 5acabbd..7ea14fa 100644 +--- a/arch/powerpc/include/asm/kmap_types.h ++++ b/arch/powerpc/include/asm/kmap_types.h +@@ -10,7 +10,7 @@ + * 2 of the License, or (at your option) any later version. + */ + +-#define KM_TYPE_NR 16 ++#define KM_TYPE_NR 17 + + #endif /* __KERNEL__ */ + #endif /* _ASM_POWERPC_KMAP_TYPES_H */ +diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h +index 8565c25..2865190 100644 +--- a/arch/powerpc/include/asm/mman.h ++++ b/arch/powerpc/include/asm/mman.h +@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) + } + #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) + +-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) ++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) + { + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); + } +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h +index f072e97..b436dee 100644 +--- a/arch/powerpc/include/asm/page.h ++++ b/arch/powerpc/include/asm/page.h +@@ -220,8 +220,9 @@ extern long long virt_phys_offset; + * and needs to be executable. This means the whole heap ends + * up being executable. + */ +-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_DATA_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +@@ -249,6 +250,9 @@ extern long long virt_phys_offset; + #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + /* + * Use the top bit of the higher-level page table entries to indicate whether + * the entries we point to contain hugepages. This works because we know that +diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h +index cd915d6..c10cee8 100644 +--- a/arch/powerpc/include/asm/page_64.h ++++ b/arch/powerpc/include/asm/page_64.h +@@ -154,15 +154,18 @@ do { \ + * stack by default, so in the absence of a PT_GNU_STACK program header + * we turn execute permission off. + */ +-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_STACK_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + ++#ifndef CONFIG_PAX_PAGEEXEC + #define VM_STACK_DEFAULT_FLAGS \ + (is_32bit_task() ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) ++#endif + + #include <asm-generic/getorder.h> + +diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h +index 292725c..f87ae14 100644 +--- a/arch/powerpc/include/asm/pgalloc-64.h ++++ b/arch/powerpc/include/asm/pgalloc-64.h +@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) + #ifndef CONFIG_PPC_64K_PAGES + + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) ++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD)) + + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { +@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + pud_set(pud, (unsigned long)pmd); + } + ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} ++ + #define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) + #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) +@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + #else /* CONFIG_PPC_64K_PAGES */ + + #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) ++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) + + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h +index a9cbd3b..3b67efa 100644 +--- a/arch/powerpc/include/asm/pgtable.h ++++ b/arch/powerpc/include/asm/pgtable.h +@@ -2,6 +2,7 @@ + #define _ASM_POWERPC_PGTABLE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #ifndef __ASSEMBLY__ + #include <asm/processor.h> /* For TASK_SIZE */ + #include <asm/mmu.h> +diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h +index 4aad413..85d86bf 100644 +--- a/arch/powerpc/include/asm/pte-hash32.h ++++ b/arch/powerpc/include/asm/pte-hash32.h +@@ -21,6 +21,7 @@ + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ + #define _PAGE_USER 0x004 /* usermode access allowed */ + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ ++#define _PAGE_EXEC _PAGE_GUARDED + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 3b097a8..8f8c774 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -234,6 +234,7 @@ + #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ + #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ + #define DSISR_NOHPTE 0x40000000 /* no translation found */ ++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ + #define DSISR_PROTFAULT 0x08000000 /* protection fault */ + #define DSISR_ISSTORE 0x02000000 /* access was a store */ + #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h +index 195ce2a..ab5c614 100644 +--- a/arch/powerpc/include/asm/smp.h ++++ b/arch/powerpc/include/asm/smp.h +@@ -50,7 +50,7 @@ struct smp_ops_t { + int (*cpu_disable)(void); + void (*cpu_die)(unsigned int nr); + int (*cpu_bootable)(unsigned int nr); +-}; ++} __no_const; + + extern void smp_send_debugger_break(void); + extern void start_secondary_resume(void); +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index 406b7b9..af63426 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void) + #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SINGLESTEP 8 /* singlestepping active */ +-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ + #define TIF_SECCOMP 10 /* secure computing */ + #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ + #define TIF_NOERROR 12 /* Force successful syscall return */ +@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void) + #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ + #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation + for stack store? */ ++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */ ++#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void) + #define _TIF_UPROBE (1<<TIF_UPROBE) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ +- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ ++ _TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE) +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index 4db4959..aba5c41 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -318,52 +318,6 @@ do { \ + extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +-#ifndef __powerpc64__ +- +-static inline unsigned long copy_from_user(void *to, +- const void __user *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } +- return n; +-} +- +-static inline unsigned long copy_to_user(void __user *to, +- const void *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } +- return n; +-} +- +-#else /* __powerpc64__ */ +- +-#define __copy_in_user(to, from, size) \ +- __copy_tofrom_user((to), (from), (size)) +- +-extern unsigned long copy_from_user(void *to, const void __user *from, +- unsigned long n); +-extern unsigned long copy_to_user(void __user *to, const void *from, +- unsigned long n); +-extern unsigned long copy_in_user(void __user *to, const void __user *from, +- unsigned long n); +- +-#endif /* __powerpc64__ */ +- + static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) + { +@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ + return __copy_tofrom_user((__force void __user *)to, from, n); + } + +@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_tofrom_user(to, (__force const void __user *)from, n); + } + +@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to, + return __copy_to_user_inatomic(to, from, size); + } + ++#ifndef __powerpc64__ ++ ++static inline unsigned long __must_check copy_from_user(void *to, ++ const void __user *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_READ, from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ return __copy_tofrom_user((__force void __user *)to, from, n); ++ } ++ if ((unsigned long)from < TASK_SIZE) { ++ over = (unsigned long)from + n - TASK_SIZE; ++ if (!__builtin_constant_p(n - over)) ++ check_object_size(to, n - over, false); ++ return __copy_tofrom_user((__force void __user *)to, from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, ++ const void *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_WRITE, to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, n); ++ } ++ if ((unsigned long)to < TASK_SIZE) { ++ over = (unsigned long)to + n - TASK_SIZE; ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n - over, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++#else /* __powerpc64__ */ ++ ++#define __copy_in_user(to, from, size) \ ++ __copy_tofrom_user((to), (from), (size)) ++ ++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ n = __copy_from_user(to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (likely(access_ok(VERIFY_WRITE, to, n))) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ n = __copy_to_user(to, from, n); ++ } ++ return n; ++} ++ ++extern unsigned long copy_in_user(void __user *to, const void __user *from, ++ unsigned long n); ++ ++#endif /* __powerpc64__ */ ++ + extern unsigned long __clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index ae54553..cf2184d 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -716,6 +716,7 @@ storage_fault_common: + std r14,_DAR(r1) + std r15,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + mr r4,r14 + mr r5,r15 + ld r14,PACA_EXGEN+EX_R14(r13) +@@ -724,8 +725,7 @@ storage_fault_common: + cmpdi r3,0 + bne- 1f + b .ret_from_except_lite +-1: bl .save_nvgprs +- mr r5,r3 ++1: mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + ld r4,_DAR(r1) + bl .bad_page_fault +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index 644378e..b6f2c26 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1390,10 +1390,10 @@ handle_page_fault: + 11: ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + bl .do_page_fault + cmpdi r3,0 + beq+ 12f +- bl .save_nvgprs + mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) +diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c +index 2e3200c..72095ce 100644 +--- a/arch/powerpc/kernel/module_32.c ++++ b/arch/powerpc/kernel/module_32.c +@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { +- printk("Module doesn't contain .plt or .init.plt sections.\n"); ++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); + return -ENOEXEC; + } + +@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location, + + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); + /* Init, or core PLT? */ +- if (location >= mod->module_core +- && location < mod->module_core + mod->core_size) ++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || ++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; +- else ++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || ++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; ++ else { ++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); ++ return ~0UL; ++ } + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->jump[0]) { +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 0d86c8a..df4c5f2 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -871,8 +871,8 @@ void show_regs(struct pt_regs * regs) + * Lookup NIP late so we have the best change of getting the + * above info out without failing + */ +- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); +- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); ++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); ++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); + #endif + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM + printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch); +@@ -1331,10 +1331,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + newsp = stack[0]; + ip = stack[STACK_FRAME_LR_SAVE]; + if (!firstframe || ip != lr) { +- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); ++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((ip == rth || ip == mrth) && curr_frame >= 0) { +- printk(" (%pS)", ++ printk(" (%pA)", + (void *)current->ret_stack[curr_frame].ret); + curr_frame--; + } +@@ -1354,7 +1354,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + struct pt_regs *regs = (struct pt_regs *) + (sp + STACK_FRAME_OVERHEAD); + lr = regs->link; +- printk("--- Exception: %lx at %pS\n LR = %pS\n", ++ printk("--- Exception: %lx at %pA\n LR = %pA\n", + regs->trap, (void *)regs->nip, (void *)lr); + firstframe = 1; + } +@@ -1396,58 +1396,3 @@ void notrace __ppc64_runlatch_off(void) + mtspr(SPRN_CTRLT, ctrl); + } + #endif /* CONFIG_PPC64 */ +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = 0; +- +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); +- else +- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); +- +- return rnd << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +-#ifdef CONFIG_PPC_STD_MMU_64 +- /* +- * If we are using 1TB segments and we are allowed to randomise +- * the heap, we can put it above 1TB so it is backed by a 1TB +- * segment. Otherwise the heap will be in the bottom 1TB +- * which always uses 256MB segments and this may result in a +- * performance penalty. +- */ +- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) +- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); +-#endif +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < base) +- return base; +- +- return ret; +-} +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c +index f9b30c6..d72e7a3 100644 +--- a/arch/powerpc/kernel/ptrace.c ++++ b/arch/powerpc/kernel/ptrace.c +@@ -1771,6 +1771,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1781,6 +1785,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) + + secure_computing_strict(regs->gpr[0]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + /* +@@ -1815,6 +1824,11 @@ void do_syscall_trace_leave(struct pt_regs *regs) + { + int step; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c +index 201385c..0f01828 100644 +--- a/arch/powerpc/kernel/signal_32.c ++++ b/arch/powerpc/kernel/signal_32.c +@@ -976,7 +976,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, + /* Save user registers on the stack */ + frame = &rt_sf->uc.uc_mcontext; + addr = frame; +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + sigret = 0; + tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; + } else { +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index 3459473..2d40783 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -749,7 +749,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, + #endif + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); +diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c +index 3ce1f86..c30e629 100644 +--- a/arch/powerpc/kernel/sysfs.c ++++ b/arch/powerpc/kernel/sysfs.c +@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = { ++static struct notifier_block sysfs_cpu_nb = { + .notifier_call = sysfs_cpu_notify, + }; + +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index bf33ace..e836d8b 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs) + return flags; + } + ++extern void gr_handle_kernel_exploit(void); ++ + static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, + int signr) + { +@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); ++ ++ gr_handle_kernel_exploit(); ++ + do_exit(signr); + } + +diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c +index 1b2076f..835e4be 100644 +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -34,6 +34,7 @@ + #include <asm/firmware.h> + #include <asm/vdso.h> + #include <asm/vdso_datapage.h> ++#include <asm/mman.h> + + #include "setup.h" + +@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = VDSO32_MBASE; + #endif + +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso_base = ~0UL; + + /* vDSO has a problem and was disabled, just don't "enable" it for the + * process +@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = get_unmapped_area(NULL, vdso_base, + (vdso_pages << PAGE_SHIFT) + + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), +- 0, 0); ++ 0, MAP_PRIVATE | MAP_EXECUTABLE); + if (IS_ERR_VALUE(vdso_base)) { + rc = vdso_base; + goto fail_mmapsem; +diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c +index 5eea6f3..5d10396 100644 +--- a/arch/powerpc/lib/usercopy_64.c ++++ b/arch/powerpc/lib/usercopy_64.c +@@ -9,22 +9,6 @@ + #include <linux/module.h> + #include <asm/uaccess.h> + +-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_READ, from, n))) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; +-} +- +-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_WRITE, to, n))) +- n = __copy_to_user(to, from, n); +- return n; +-} +- + unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n) + { +@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, + return n; + } + +-EXPORT_SYMBOL(copy_from_user); +-EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(copy_in_user); + +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 229951f..cdeca42 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -32,6 +32,10 @@ + #include <linux/perf_event.h> + #include <linux/magic.h> + #include <linux/ratelimit.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> ++#include <linux/unistd.h> + + #include <asm/firmware.h> + #include <asm/page.h> +@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->nip = fault address) ++ * ++ * returns 1 when task should be killed ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int __user *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * Check whether the instruction at regs->nip is a store using + * an update addressing form which will update r1. +@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, + * indicate errors in DSISR but can validly be set in SRR1. + */ + if (trap == 0x400) +- error_code &= 0x48200000; ++ error_code &= 0x58200000; + else + is_write = error_code & DSISR_ISSTORE; + #else +@@ -364,7 +395,7 @@ good_area: + * "undefined". Of those that can be set, this is the only + * one which seems bad. + */ +- if (error_code & 0x10000000) ++ if (error_code & DSISR_GUARDED) + /* Guarded storage error. */ + goto bad_area; + #endif /* CONFIG_8xx */ +@@ -379,7 +410,7 @@ good_area: + * processors use the same I/D cache coherency mechanism + * as embedded. + */ +- if (error_code & DSISR_PROTFAULT) ++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) + goto bad_area; + #endif /* CONFIG_PPC_STD_MMU */ + +@@ -462,6 +493,23 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++#ifdef CONFIG_PPC_STD_MMU ++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { ++#else ++ if (is_exec && regs->nip == address) { ++#endif ++ switch (pax_handle_fetch_fault(regs)) { ++ } ++ ++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + _exception(SIGSEGV, regs, code, address); + return 0; + } +diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c +index 67a42ed..cd463e0 100644 +--- a/arch/powerpc/mm/mmap_64.c ++++ b/arch/powerpc/mm/mmap_64.c +@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void) + { + unsigned long rnd = 0; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) +@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c +index e779642..e5bb889 100644 +--- a/arch/powerpc/mm/mmu_context_nohash.c ++++ b/arch/powerpc/mm/mmu_context_nohash.c +@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { ++static struct notifier_block mmu_context_cpu_nb = { + .notifier_call = mmu_context_cpu_notify, + }; + +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c +index 6a252c4..3024d81 100644 +--- a/arch/powerpc/mm/numa.c ++++ b/arch/powerpc/mm/numa.c +@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size, + return ret; + } + +-static struct notifier_block __cpuinitdata ppc64_numa_nb = { ++static struct notifier_block ppc64_numa_nb = { + .notifier_call = cpu_numa_callback, + .priority = 1 /* Must run before sched domains notifier. */ + }; +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index cf9dada..241529f 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len, 0); + } + + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) +@@ -272,7 +272,7 @@ full_search: + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); + continue; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len, 0)) { + /* + * Remember the place where we stopped the search: + */ +@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + } + } + +- addr = mm->mmap_base; +- while (addr > len) { ++ if (mm->mmap_base < len) ++ addr = -ENOMEM; ++ else ++ addr = mm->mmap_base - len; ++ ++ while (!IS_ERR_VALUE(addr)) { + /* Go down by chunk size */ +- addr = _ALIGN_DOWN(addr - len, 1ul << pshift); ++ addr = _ALIGN_DOWN(addr, 1ul << pshift); + + /* Check for hit with different page size */ + mask = slice_range_to_mask(addr, len); +@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || (addr + len) <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len, 0)) { + /* remember the address as a hint for next time */ + if (use_cache) + mm->free_area_cache = addr; +@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start; ++ addr = skip_heap_stack_gap(vma, len, 0); + } + + /* +@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + if (fixed && addr > (mm->task_size - len)) + return -EINVAL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) ++ addr = 0; ++#endif ++ + /* If hint, make sure it matches our alignment restrictions */ + if (!fixed && addr) { + addr = _ALIGN_UP(addr, 1ul << pshift); +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index 68c57d3..1fdcfb2 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + return VM_FAULT_NOPAGE; + } + +-static int spufs_mem_mmap_access(struct vm_area_struct *vma, ++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma, + unsigned long address, +- void *buf, int len, int write) ++ void *buf, size_t len, int write) + { + struct spu_context *ctx = vma->vm_file->private_data; + unsigned long offset = address - vma->vm_start; +diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c +index bdb738a..49c9f95 100644 +--- a/arch/powerpc/platforms/powermac/smp.c ++++ b/arch/powerpc/platforms/powermac/smp.c +@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { ++static struct notifier_block smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, + }; + #endif /* CONFIG_HOTPLUG_CPU */ +diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h +index c797832..ce575c8 100644 +--- a/arch/s390/include/asm/atomic.h ++++ b/arch/s390/include/asm/atomic.h +@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) + #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h +index 4d7ccac..d03d0ad 100644 +--- a/arch/s390/include/asm/cache.h ++++ b/arch/s390/include/asm/cache.h +@@ -9,8 +9,10 @@ + #ifndef __ARCH_S390_CACHE_H + #define __ARCH_S390_CACHE_H + +-#define L1_CACHE_BYTES 256 ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 8 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define NET_SKB_PAD 32 + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h +index 1bfdf24..9c9ab2e 100644 +--- a/arch/s390/include/asm/elf.h ++++ b/arch/s390/include/asm/elf.h +@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) ++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ +@@ -207,9 +213,6 @@ struct linux_binprm; + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + int arch_setup_additional_pages(struct linux_binprm *, int); + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); + + #endif +diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h +index c4a93d6..4d2a9b4 100644 +--- a/arch/s390/include/asm/exec.h ++++ b/arch/s390/include/asm/exec.h +@@ -7,6 +7,6 @@ + #ifndef __ASM_EXEC_H + #define __ASM_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* __ASM_EXEC_H */ +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 9c33ed4..e40cbef 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -252,6 +252,10 @@ static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + return __copy_to_user(to, from, n); + } + +@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) + static inline unsigned long __must_check + __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_from_user_small(n, from, to); + else +@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct") + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- unsigned int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + + might_fault(); +- if (unlikely(sz != -1 && sz < n)) { ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (unlikely(sz != (size_t)-1 && sz < n)) { + copy_from_user_overflow(); + return n; + } +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c +index 7845e15..59c4353 100644 +--- a/arch/s390/kernel/module.c ++++ b/arch/s390/kernel/module.c +@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + + /* Increase core size by size of got & plt and set start + offsets for got and plt. */ +- me->core_size = ALIGN(me->core_size, 4); +- me->arch.got_offset = me->core_size; +- me->core_size += me->arch.got_size; +- me->arch.plt_offset = me->core_size; +- me->core_size += me->arch.plt_size; ++ me->core_size_rw = ALIGN(me->core_size_rw, 4); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += me->arch.got_size; ++ me->arch.plt_offset = me->core_size_rx; ++ me->core_size_rx += me->arch.plt_size; + return 0; + } + +@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + if (info->got_initialized == 0) { + Elf_Addr *gotent; + +- gotent = me->module_core + me->arch.got_offset + ++ gotent = me->module_core_rw + me->arch.got_offset + + info->got_offset; + *gotent = val; + info->got_initialized = 1; +@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + rc = apply_rela_bits(loc, val, 0, 64, 0); + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) { +- val += (Elf_Addr) me->module_core - loc; ++ val += (Elf_Addr) me->module_core_rw - loc; + rc = apply_rela_bits(loc, val, 1, 32, 1); + } + break; +@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ + if (info->plt_initialized == 0) { + unsigned int *ip; +- ip = me->module_core + me->arch.plt_offset + ++ ip = me->module_core_rx + me->arch.plt_offset + + info->plt_offset; + #ifndef CONFIG_64BIT + ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ +@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) +- val = (Elf_Addr) me->module_core + ++ val = (Elf_Addr) me->module_core_rx + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; +@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_GOTOFF32: /* 32 bit offset to GOT. */ + case R_390_GOTOFF64: /* 64 bit offset to GOT. */ + val = val + rela->r_addend - +- ((Elf_Addr) me->module_core + me->arch.got_offset); ++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); + if (r_type == R_390_GOTOFF16) + rc = apply_rela_bits(loc, val, 0, 16, 0); + else if (r_type == R_390_GOTOFF32) +@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + break; + case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ + case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ +- val = (Elf_Addr) me->module_core + me->arch.got_offset + ++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + + rela->r_addend - loc; + if (r_type == R_390_GOTPC) + rc = apply_rela_bits(loc, val, 1, 32, 0); +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c +index 536d645..4a5bd9e 100644 +--- a/arch/s390/kernel/process.c ++++ b/arch/s390/kernel/process.c +@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p) + } + return 0; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; +- else +- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (!(current->flags & PF_RANDOMIZE)) +- return base; +- if (ret < base) +- return base; +- return ret; +-} +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index 06bafec..2bca531 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h +index ae3d59f..f65f075 100644 +--- a/arch/score/include/asm/cache.h ++++ b/arch/score/include/asm/cache.h +@@ -1,7 +1,9 @@ + #ifndef _ASM_SCORE_CACHE_H + #define _ASM_SCORE_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_SCORE_CACHE_H */ +diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h +index f9f3cd5..58ff438 100644 +--- a/arch/score/include/asm/exec.h ++++ b/arch/score/include/asm/exec.h +@@ -1,6 +1,6 @@ + #ifndef _ASM_SCORE_EXEC_H + #define _ASM_SCORE_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) (x) + + #endif /* _ASM_SCORE_EXEC_H */ +diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c +index 7956846..5f37677 100644 +--- a/arch/score/kernel/process.c ++++ b/arch/score/kernel/process.c +@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task) + + return task_pt_regs(task)->cp0_epc; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- return sp; +-} +diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h +index ef9e555..331bd29 100644 +--- a/arch/sh/include/asm/cache.h ++++ b/arch/sh/include/asm/cache.h +@@ -9,10 +9,11 @@ + #define __ASM_SH_CACHE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #include <linux/init.h> + #include <cpu/cache.h> + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c +index 03f2b55..b0270327 100644 +--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c ++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c +@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata shx3_cpu_notifier = { ++static struct notifier_block shx3_cpu_notifier = { + .notifier_call = shx3_cpu_callback, + }; + +diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c +index 6777177..cb5e44f 100644 +--- a/arch/sh/mm/mmap.c ++++ b/arch/sh/mm/mmap.c +@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int do_colour_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { +@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; +@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_colour_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { +@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_colour_align) +@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h +index be56a24..443328f 100644 +--- a/arch/sparc/include/asm/atomic_64.h ++++ b/arch/sparc/include/asm/atomic_64.h +@@ -14,18 +14,40 @@ + #define ATOMIC64_INIT(i) { (i) } + + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic64_read(v) (*(volatile long *)&(v)->counter) ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return v->counter; ++} + + #define atomic_set(v, i) (((v)->counter) = i) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + #define atomic64_set(v, i) (((v)->counter) = i) ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} + + extern void atomic_add(int, atomic_t *); ++extern void atomic_add_unchecked(int, atomic_unchecked_t *); + extern void atomic64_add(long, atomic64_t *); ++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); + extern void atomic_sub(int, atomic_t *); ++extern void atomic_sub_unchecked(int, atomic_unchecked_t *); + extern void atomic64_sub(long, atomic64_t *); ++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); + + extern int atomic_add_ret(int, atomic_t *); ++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); + extern long atomic64_add_ret(long, atomic64_t *); ++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); + extern int atomic_sub_ret(int, atomic_t *); + extern long atomic64_sub_ret(long, atomic64_t *); + +@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_return(v) atomic64_sub_ret(1, v) + + #define atomic_inc_return(v) atomic_add_ret(1, v) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(1, v); ++} + #define atomic64_inc_return(v) atomic64_add_ret(1, v) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(1, v); ++} + + #define atomic_sub_return(i, v) atomic_sub_ret(i, v) + #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) + + #define atomic_add_return(i, v) atomic_add_ret(i, v) ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(i, v); ++} + #define atomic64_add_return(i, v) atomic64_add_ret(i, v) ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(i, v); ++} + + /* + * atomic_inc_and_test - increment and test +@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *); + * other cases. + */ + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_inc_return_unchecked(v) == 0; ++} + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + + #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) +@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic64_inc(v) atomic64_add(1, v) ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_add_unchecked(1, v); ++} + + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + #define atomic64_dec(v) atomic64_sub(1, v) ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_sub_unchecked(1, v); ++} + + #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) + #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) + + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%icc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + #define atomic64_cmpxchg(v, o, n) \ + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%xcc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h +index 5bb6991..5c2132e 100644 +--- a/arch/sparc/include/asm/cache.h ++++ b/arch/sparc/include/asm/cache.h +@@ -7,10 +7,12 @@ + #ifndef _SPARC_CACHE_H + #define _SPARC_CACHE_H + ++#include <linux/const.h> ++ + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) + + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES 32 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SPARC32 + #define SMP_CACHE_BYTES_SHIFT 5 +diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h +index a24e41f..47677ff 100644 +--- a/arch/sparc/include/asm/elf_32.h ++++ b/arch/sparc/include/asm/elf_32.h +@@ -114,6 +114,13 @@ typedef struct { + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This can NOT be done in userspace + on Sparc. */ +diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h +index 370ca1e..d4f4a98 100644 +--- a/arch/sparc/include/asm/elf_64.h ++++ b/arch/sparc/include/asm/elf_64.h +@@ -189,6 +189,13 @@ typedef struct { + #define ELF_ET_DYN_BASE 0x0000010000000000UL + #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) ++#endif ++ + extern unsigned long sparc64_elf_hwcap; + #define ELF_HWCAP sparc64_elf_hwcap + +diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h +index 9b1c36d..209298b 100644 +--- a/arch/sparc/include/asm/pgalloc_32.h ++++ b/arch/sparc/include/asm/pgalloc_32.h +@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) + } + + #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) ++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) + + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, + unsigned long address) +diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h +index bcfe063..b333142 100644 +--- a/arch/sparc/include/asm/pgalloc_64.h ++++ b/arch/sparc/include/asm/pgalloc_64.h +@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) + } + + #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) ++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD)) + + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) + { +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h +index 6fc1348..390c50a 100644 +--- a/arch/sparc/include/asm/pgtable_32.h ++++ b/arch/sparc/include/asm/pgtable_32.h +@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void); + #define PAGE_SHARED SRMMU_PAGE_SHARED + #define PAGE_COPY SRMMU_PAGE_COPY + #define PAGE_READONLY SRMMU_PAGE_RDONLY ++#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC ++#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC ++#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC + #define PAGE_KERNEL SRMMU_PAGE_KERNEL + + /* Top-level page directory - dummy used by init-mm. +@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd; + + /* xwr */ + #define __P000 PAGE_NONE +-#define __P001 PAGE_READONLY +-#define __P010 PAGE_COPY +-#define __P011 PAGE_COPY ++#define __P001 PAGE_READONLY_NOEXEC ++#define __P010 PAGE_COPY_NOEXEC ++#define __P011 PAGE_COPY_NOEXEC + #define __P100 PAGE_READONLY + #define __P101 PAGE_READONLY + #define __P110 PAGE_COPY + #define __P111 PAGE_COPY + + #define __S000 PAGE_NONE +-#define __S001 PAGE_READONLY +-#define __S010 PAGE_SHARED +-#define __S011 PAGE_SHARED ++#define __S001 PAGE_READONLY_NOEXEC ++#define __S010 PAGE_SHARED_NOEXEC ++#define __S011 PAGE_SHARED_NOEXEC + #define __S100 PAGE_READONLY + #define __S101 PAGE_READONLY + #define __S110 PAGE_SHARED +diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h +index 79da178..c2eede8 100644 +--- a/arch/sparc/include/asm/pgtsrmmu.h ++++ b/arch/sparc/include/asm/pgtsrmmu.h +@@ -115,6 +115,11 @@ + SRMMU_EXEC | SRMMU_REF) + #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_EXEC | SRMMU_REF) ++ ++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) ++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++ + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ + SRMMU_DIRTY | SRMMU_REF) + +diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h +index 9689176..63c18ea 100644 +--- a/arch/sparc/include/asm/spinlock_64.h ++++ b/arch/sparc/include/asm/spinlock_64.h +@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla + + /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ + +-static void inline arch_read_lock(arch_rwlock_t *lock) ++static inline void arch_read_lock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__ ( + "1: ldsw [%2], %0\n" + " brlz,pn %0, 2f\n" +-"4: add %0, 1, %1\n" ++"4: addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock) + " .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) +- : "memory"); ++ : "memory", "cc"); + } + +-static int inline arch_read_trylock(arch_rwlock_t *lock) ++static inline int arch_read_trylock(arch_rwlock_t *lock) + { + int tmp1, tmp2; + +@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + "1: ldsw [%2], %0\n" + " brlz,a,pn %0, 2f\n" + " mov 0, %0\n" +-" add %0, 1, %1\n" ++" addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + return tmp1; + } + +-static void inline arch_read_unlock(arch_rwlock_t *lock) ++static inline void arch_read_unlock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: lduw [%2], %0\n" +-" sub %0, 1, %1\n" ++" subcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%xcc, 1b\n" +@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_lock(arch_rwlock_t *lock) ++static inline void arch_write_lock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2; + +@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_unlock(arch_rwlock_t *lock) ++static inline void arch_write_unlock(arch_rwlock_t *lock) + { + __asm__ __volatile__( + " stw %%g0, [%0]" +@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static int inline arch_write_trylock(arch_rwlock_t *lock) ++static inline int arch_write_trylock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2, result; + +diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h +index 25849ae..924c54b 100644 +--- a/arch/sparc/include/asm/thread_info_32.h ++++ b/arch/sparc/include/asm/thread_info_32.h +@@ -49,6 +49,8 @@ struct thread_info { + unsigned long w_saved; + + struct restart_block restart_block; ++ ++ unsigned long lowest_stack; + }; + + /* +diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h +index 269bd92..e46a9b8 100644 +--- a/arch/sparc/include/asm/thread_info_64.h ++++ b/arch/sparc/include/asm/thread_info_64.h +@@ -63,6 +63,8 @@ struct thread_info { + struct pt_regs *kern_una_regs; + unsigned int kern_una_insn; + ++ unsigned long lowest_stack; ++ + unsigned long fpregs[0] __attribute__ ((aligned(64))); + }; + +@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ + /* flag bit 6 is available */ + #define TIF_32BIT 7 /* 32-bit binary */ +-/* flag bit 8 is available */ ++#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */ + #define TIF_SECCOMP 9 /* secure computing */ + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ ++ + /* NOTE: Thread flags >= 12 should be ones we have no interest + * in using in assembly, else we can't use the mask as + * an immediate value in instructions such as andcc. +@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ + _TIF_DO_NOTIFY_RESUME_MASK | \ + _TIF_NEED_RESCHED) + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING) + ++#define _TIF_WORK_SYSCALL \ ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \ ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) ++ ++ + /* + * Thread-synchronous status. + * +diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h +index 0167d26..767bb0c 100644 +--- a/arch/sparc/include/asm/uaccess.h ++++ b/arch/sparc/include/asm/uaccess.h +@@ -1,5 +1,6 @@ + #ifndef ___ASM_SPARC_UACCESS_H + #define ___ASM_SPARC_UACCESS_H ++ + #if defined(__sparc__) && defined(__arch64__) + #include <asm/uaccess_64.h> + #else +diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h +index 53a28dd..50c38c3 100644 +--- a/arch/sparc/include/asm/uaccess_32.h ++++ b/arch/sparc/include/asm/uaccess_32.h +@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig + + static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) to, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_user(to, (__force void __user *) from, n); + } + + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) from, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_user((__force void __user *) to, from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + return __copy_user((__force void __user *) to, from, n); + } + +diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h +index e562d3c..191f176 100644 +--- a/arch/sparc/include/asm/uaccess_64.h ++++ b/arch/sparc/include/asm/uaccess_64.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <linux/string.h> + #include <linux/thread_info.h> ++#include <linux/kernel.h> + #include <asm/asi.h> + #include <asm/spitfire.h> + #include <asm-generic/uaccess-unaligned.h> +@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long size) + { +- unsigned long ret = ___copy_from_user(to, from, size); ++ unsigned long ret; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(to, size, false); ++ ++ ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + +@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, + static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long size) + { +- unsigned long ret = ___copy_to_user(to, from, size); ++ unsigned long ret; ++ ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(from, size, true); + ++ ret = ___copy_to_user(to, from, size); + if (unlikely(ret)) + ret = copy_to_user_fixup(to, from, size); + return ret; +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile +index 6cf591b..b49e65a 100644 +--- a/arch/sparc/kernel/Makefile ++++ b/arch/sparc/kernel/Makefile +@@ -3,7 +3,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + extra-y := head_$(BITS).o + +diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c +index 62eede1..9c5b904 100644 +--- a/arch/sparc/kernel/process_32.c ++++ b/arch/sparc/kernel/process_32.c +@@ -125,14 +125,14 @@ void show_regs(struct pt_regs *r) + + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", + r->psr, r->pc, r->npc, r->y, print_tainted()); +- printk("PC: <%pS>\n", (void *) r->pc); ++ printk("PC: <%pA>\n", (void *) r->pc); + printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], + r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); + printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], + r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) r->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]); + + printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], +@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + rw = (struct reg_window32 *) fp; + pc = rw->ins[7]; + printk("[%08lx : ", pc); +- printk("%pS ] ", (void *) pc); ++ printk("%pA ] ", (void *) pc); + fp = rw->ins[6]; + } while (++count < 16); + printk("\n"); +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c +index cdb80b2..5ca141d 100644 +--- a/arch/sparc/kernel/process_64.c ++++ b/arch/sparc/kernel/process_64.c +@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs) + printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", + rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); + if (regs->tstate & TSTATE_PRIV) +- printk("I7: <%pS>\n", (void *) rwk->ins[7]); ++ printk("I7: <%pA>\n", (void *) rwk->ins[7]); + } + + void show_regs(struct pt_regs *regs) + { + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, + regs->tpc, regs->tnpc, regs->y, print_tainted()); +- printk("TPC: <%pS>\n", (void *) regs->tpc); ++ printk("TPC: <%pA>\n", (void *) regs->tpc); + printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", + regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], + regs->u_regs[3]); +@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs) + printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", + regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], + regs->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); + show_regwindow(regs); + show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); + } +@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void) + ((tp && tp->task) ? tp->task->pid : -1)); + + if (gp->tstate & TSTATE_PRIV) { +- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", ++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", + (void *) gp->tpc, + (void *) gp->o7, + (void *) gp->i7, +diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c +index 9f20566..67eb41b 100644 +--- a/arch/sparc/kernel/prom_common.c ++++ b/arch/sparc/kernel/prom_common.c +@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf) + + unsigned int prom_early_allocated __initdata; + +-static struct of_pdt_ops prom_sparc_ops __initdata = { ++static struct of_pdt_ops prom_sparc_ops __initconst = { + .nextprop = prom_common_nextprop, + .getproplen = prom_getproplen, + .getproperty = prom_getproperty, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 7ff45e4..a58f271 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace_enter(struct pt_regs *regs) + { + int ret = 0; +@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) + /* do the secure computing check first */ + secure_computing_strict(regs->u_regs[UREG_G1]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + +@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) + + asmlinkage void syscall_trace_leave(struct pt_regs *regs) + { ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c +index 3a8d184..49498a8 100644 +--- a/arch/sparc/kernel/sys_sparc_32.c ++++ b/arch/sparc/kernel/sys_sparc_32.c +@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (len > TASK_SIZE - PAGE_SIZE) + return -ENOMEM; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + info.flags = 0; + info.length = len; +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index 708bc29..6bfdfad 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + struct vm_area_struct * vma; + unsigned long task_size = TASK_SIZE; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOR_ALIGN(addr, pgoff); +@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = task_size; + addr = vm_unmapped_area(&info); + } +@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + unsigned long task_size = STACK_TOP32; + unsigned long addr = addr0; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* This should only ever run for 32-bit processes. */ +@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_color_align) +@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } +@@ -264,6 +286,10 @@ static unsigned long mmap_rnd(void) + { + unsigned long rnd = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + unsigned long val = get_random_int(); + if (test_thread_flag(TIF_32BIT)) +@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { +@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap = (task_size / 6 * 5); + + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S +index 22a1098..6255eb9 100644 +--- a/arch/sparc/kernel/syscalls.S ++++ b/arch/sparc/kernel/syscalls.S +@@ -52,7 +52,7 @@ sys32_rt_sigreturn: + #endif + .align 32 + 1: ldx [%g6 + TI_FLAGS], %l5 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0 + be,pt %icc, rtrap + nop + call syscall_trace_leave +@@ -184,7 +184,7 @@ linux_sparc_syscall32: + + srl %i5, 0, %o5 ! IEU1 + srl %i2, 0, %o2 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace32 ! CTI + mov %i0, %l5 ! IEU1 + call %l7 ! CTI Group brk forced +@@ -207,7 +207,7 @@ linux_sparc_syscall: + + mov %i3, %o3 ! IEU1 + mov %i4, %o4 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace ! CTI Group + mov %i0, %l5 ! IEU0 + 2: call %l7 ! CTI Group brk forced +@@ -223,7 +223,7 @@ ret_sys_call: + + cmp %o0, -ERESTART_RESTARTBLOCK + bgeu,pn %xcc, 1f +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + + 2: +diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c +index 654e8aa..45f431b 100644 +--- a/arch/sparc/kernel/sysfs.c ++++ b/arch/sparc/kernel/sysfs.c +@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = { ++static struct notifier_block sysfs_cpu_nb = { + .notifier_call = sysfs_cpu_notify, + }; + +diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c +index 6629829..036032d 100644 +--- a/arch/sparc/kernel/traps_32.c ++++ b/arch/sparc/kernel/traps_32.c +@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc) + #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") + #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs) + count++ < 30 && + (((unsigned long) rw) >= PAGE_OFFSET) && + !(((unsigned long) rw) & 0x7)) { +- printk("Caller[%08lx]: %pS\n", rw->ins[7], ++ printk("Caller[%08lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + rw = (struct reg_window32 *)rw->ins[6]; + } + } + printk("Instruction DUMP:"); + instruction_dump ((unsigned long *) regs->pc); +- if(regs->psr & PSR_PS) ++ if(regs->psr & PSR_PS) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c +index 8d38ca9..845b1d6 100644 +--- a/arch/sparc/kernel/traps_64.c ++++ b/arch/sparc/kernel/traps_64.c +@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) + i + 1, + p->trapstack[i].tstate, p->trapstack[i].tpc, + p->trapstack[i].tnpc, p->trapstack[i].tt); +- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); ++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); + } + } + +@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl) + + lvl -= 0x100; + if (regs->tstate & TSTATE_PRIV) { ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + sprintf(buffer, "Kernel bad sw trap %lx", lvl); + die_if_kernel(buffer, regs); + } +@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl) + void bad_trap_tl1(struct pt_regs *regs, long lvl) + { + char buffer[32]; +- ++ + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + sprintf (buffer, "Bad trap %lx at tl>0", lvl); +@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in + regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); + printk("%s" "ERROR(%d): ", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); +- printk("TPC<%pS>\n", (void *) regs->tpc); ++ printk("TPC<%pA>\n", (void *) regs->tpc); + printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, +@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); + panic("Irrecoverable Cheetah+ parity error."); + } + +@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); + } + + struct sun4v_error_entry { +@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + fp = (unsigned long)sf->fp + STACK_BIAS; + } + +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = tsk->curr_ret_stack; + if (tsk->ret_stack && index >= graph) { + pc = tsk->ret_stack[index - graph].ret; +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + graph++; + } + } +@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) + return (struct reg_window *) (fp + STACK_BIAS); + } + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) + while (rw && + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { +- printk("Caller[%016lx]: %pS\n", rw->ins[7], ++ printk("Caller[%016lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + + rw = kernel_stack_up(rw); +@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) + } + user_instruction_dump ((unsigned int __user *) regs->tpc); + } +- if (regs->tstate & TSTATE_PRIV) ++ if (regs->tstate & TSTATE_PRIV) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + EXPORT_SYMBOL(die_if_kernel); +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c +index 8201c25e..072a2a7 100644 +--- a/arch/sparc/kernel/unaligned_64.c ++++ b/arch/sparc/kernel/unaligned_64.c +@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs) + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); + + if (__ratelimit(&ratelimit)) { +- printk("Kernel unaligned access at TPC[%lx] %pS\n", ++ printk("Kernel unaligned access at TPC[%lx] %pA\n", + regs->tpc, (void *) regs->tpc); + } + } +diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c +index eb1624b..55100de 100644 +--- a/arch/sparc/kernel/us3_cpufreq.c ++++ b/arch/sparc/kernel/us3_cpufreq.c +@@ -18,14 +18,12 @@ + #include <asm/head.h> + #include <asm/timer.h> + +-static struct cpufreq_driver *cpufreq_us3_driver; +- + struct us3_freq_percpu_info { + struct cpufreq_frequency_table table[4]; + }; + + /* Indexed by cpu number. */ +-static struct us3_freq_percpu_info *us3_freq_table; ++static struct us3_freq_percpu_info us3_freq_table[NR_CPUS]; + + /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled + * in the Safari config register. +@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) + + static int us3_freq_cpu_exit(struct cpufreq_policy *policy) + { +- if (cpufreq_us3_driver) +- us3_set_cpu_divider_index(policy->cpu, 0); ++ us3_set_cpu_divider_index(policy->cpu, 0); + + return 0; + } + ++static int __init us3_freq_init(void); ++static void __exit us3_freq_exit(void); ++ ++static struct cpufreq_driver cpufreq_us3_driver = { ++ .init = us3_freq_cpu_init, ++ .verify = us3_freq_verify, ++ .target = us3_freq_target, ++ .get = us3_freq_get, ++ .exit = us3_freq_cpu_exit, ++ .owner = THIS_MODULE, ++ .name = "UltraSPARC-III", ++ ++}; ++ + static int __init us3_freq_init(void) + { + unsigned long manuf, impl, ver; +@@ -213,57 +224,15 @@ static int __init us3_freq_init(void) + (impl == CHEETAH_IMPL || + impl == CHEETAH_PLUS_IMPL || + impl == JAGUAR_IMPL || +- impl == PANTHER_IMPL)) { +- struct cpufreq_driver *driver; +- +- ret = -ENOMEM; +- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); +- if (!driver) +- goto err_out; +- +- us3_freq_table = kzalloc( +- (NR_CPUS * sizeof(struct us3_freq_percpu_info)), +- GFP_KERNEL); +- if (!us3_freq_table) +- goto err_out; +- +- driver->init = us3_freq_cpu_init; +- driver->verify = us3_freq_verify; +- driver->target = us3_freq_target; +- driver->get = us3_freq_get; +- driver->exit = us3_freq_cpu_exit; +- driver->owner = THIS_MODULE, +- strcpy(driver->name, "UltraSPARC-III"); +- +- cpufreq_us3_driver = driver; +- ret = cpufreq_register_driver(driver); +- if (ret) +- goto err_out; +- +- return 0; +- +-err_out: +- if (driver) { +- kfree(driver); +- cpufreq_us3_driver = NULL; +- } +- kfree(us3_freq_table); +- us3_freq_table = NULL; +- return ret; +- } ++ impl == PANTHER_IMPL)) ++ return cpufreq_register_driver(&cpufreq_us3_driver); + + return -ENODEV; + } + + static void __exit us3_freq_exit(void) + { +- if (cpufreq_us3_driver) { +- cpufreq_unregister_driver(cpufreq_us3_driver); +- kfree(cpufreq_us3_driver); +- cpufreq_us3_driver = NULL; +- kfree(us3_freq_table); +- us3_freq_table = NULL; +- } ++ cpufreq_unregister_driver(&cpufreq_us3_driver); + } + + MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); +diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile +index 8410065f2..4fd4ca22 100644 +--- a/arch/sparc/lib/Makefile ++++ b/arch/sparc/lib/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi -DST_DIV0=0x02 +-ccflags-y := -Werror ++#ccflags-y := -Werror + + lib-$(CONFIG_SPARC32) += ashrdi3.o + lib-$(CONFIG_SPARC32) += memcpy.o memset.o +diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S +index 85c233d..68500e0 100644 +--- a/arch/sparc/lib/atomic_64.S ++++ b/arch/sparc/lib/atomic_64.S +@@ -17,7 +17,12 @@ + ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_add) + ++ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ add %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_add_unchecked) ++ + ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_sub) + ++ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ sub %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_sub_unchecked) ++ + ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_add_ret) + ++ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ add %g7, %o0, %g7 ++ sra %g7, 0, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_add_ret_unchecked) ++ + ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret) + ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_add) + ++ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_add_unchecked) ++ + ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_sub) + ++ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ subcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_sub_unchecked) ++ + ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_add_ret) + ++ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ add %g7, %o0, %g7 ++ mov %g7, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_add_ret_unchecked) ++ + ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c +index 0c4e35e..745d3e4 100644 +--- a/arch/sparc/lib/ksyms.c ++++ b/arch/sparc/lib/ksyms.c +@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write); + + /* Atomic counter implementation. */ + EXPORT_SYMBOL(atomic_add); ++EXPORT_SYMBOL(atomic_add_unchecked); + EXPORT_SYMBOL(atomic_add_ret); ++EXPORT_SYMBOL(atomic_add_ret_unchecked); + EXPORT_SYMBOL(atomic_sub); ++EXPORT_SYMBOL(atomic_sub_unchecked); + EXPORT_SYMBOL(atomic_sub_ret); + EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); + EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); + EXPORT_SYMBOL(atomic64_sub); ++EXPORT_SYMBOL(atomic64_sub_unchecked); + EXPORT_SYMBOL(atomic64_sub_ret); + EXPORT_SYMBOL(atomic64_dec_if_positive); + +diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile +index 30c3ecc..736f015 100644 +--- a/arch/sparc/mm/Makefile ++++ b/arch/sparc/mm/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o + obj-y += fault_$(BITS).o +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c +index e98bfda..ea8d221 100644 +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -21,6 +21,9 @@ + #include <linux/perf_event.h> + #include <linux/interrupt.h> + #include <linux/kdebug.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) + return safe_compute_effective_address(regs, insn); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->pc); ++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned int addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->pc); ++ ++ if (err) ++ break; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { ++ unsigned int addr; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, bajmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) ++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ else ++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(ba, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr, save, call; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->pc = call_dl_resolve; ++ regs->npc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->pc-4)); ++ err |= get_user(call, (unsigned int *)regs->pc); ++ err |= get_user(nop, (unsigned int *)(regs->pc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); ++ ++ regs->u_regs[UREG_RETPC] = regs->pc; ++ regs->pc = dl_resolve; ++ regs->npc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + int text_fault) + { +@@ -230,6 +504,24 @@ good_area: + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 5062ff3..e0b75f3 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -21,6 +21,9 @@ + #include <linux/kprobes.h> + #include <linux/kdebug.h> + #include <linux/percpu.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) + printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", + regs->tpc); + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); +- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); ++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); + dump_stack(); + unhandled_fault(regs->tpc, current, regs); +@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, + show_regs(regs); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->tpc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->tpc); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->tpc); ++ ++ if (err) ++ break; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { ++ unsigned long addr; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, bajmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) ++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ else ++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #4 */ ++ unsigned int sethi, mov1, call, mov2; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(call, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ mov1 == 0x8210000FU && ++ (call & 0xC0000000U) == 0x40000000U && ++ mov2 == 0x9E100001U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; ++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #5 */ ++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x82106000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x83287020U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #6 */ ++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ sllx == 0x83287020U && ++ (or & 0xFFFFE000U) == 0x8A116000U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ unsigned int save, call; ++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->tpc = call_dl_resolve; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ ++ /* PaX: 64-bit PLT stub */ ++ err = get_user(sethi1, (unsigned int *)addr); ++ err |= get_user(sethi2, (unsigned int *)(addr+4)); ++ err |= get_user(or1, (unsigned int *)(addr+8)); ++ err |= get_user(or2, (unsigned int *)(addr+12)); ++ err |= get_user(sllx, (unsigned int *)(addr+16)); ++ err |= get_user(add, (unsigned int *)(addr+20)); ++ err |= get_user(jmpl, (unsigned int *)(addr+24)); ++ err |= get_user(nop, (unsigned int *)(addr+28)); ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x09000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x88112000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x89293020U && ++ add == 0x8A010005U && ++ jmpl == 0x89C14000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G4] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; ++ regs->u_regs[UREG_G4] = addr + 24; ++ addr = regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->tpc-4)); ++ err |= get_user(call, (unsigned int *)regs->tpc); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ dl_resolve &= 0xFFFFFFFFUL; ++ ++ regs->u_regs[UREG_RETPC] = regs->tpc; ++ regs->tpc = dl_resolve; ++ regs->tnpc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (ba & 0xFFF00000U) == 0x30600000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + struct mm_struct *mm = current->mm; +@@ -341,6 +804,29 @@ retry: + if (!vma) + goto bad_area; + ++#ifdef CONFIG_PAX_PAGEEXEC ++ /* PaX: detect ITLB misses on non-exec pages */ ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && ++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) ++ { ++ if (address != regs->tpc) ++ goto good_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Pure DTLB misses do not tell us whether the fault causing + * load/store/atomic was a write or not, it only says that there + * was no match. So in such a case we (carefully) read the +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index d2b5944..bd813f2 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_offset = 0; +@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = task_size; + addr = vm_unmapped_area(&info); + } +@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } +@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; ++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; +@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c +index 83d89bc..37e7bc4 100644 +--- a/arch/sparc/mm/tlb.c ++++ b/arch/sparc/mm/tlb.c +@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, + } + + if (!tb->active) { +- global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); ++ global_flush_tlb_page(mm, vaddr); + goto out; + } + +diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h +index f4500c6..889656c 100644 +--- a/arch/tile/include/asm/atomic_64.h ++++ b/arch/tile/include/asm/atomic_64.h +@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic dec and inc don't implement barrier, so provide them if needed. */ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() +diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h +index a9a5299..0fce79e 100644 +--- a/arch/tile/include/asm/cache.h ++++ b/arch/tile/include/asm/cache.h +@@ -15,11 +15,12 @@ + #ifndef _ASM_TILE_CACHE_H + #define _ASM_TILE_CACHE_H + ++#include <linux/const.h> + #include <arch/chip.h> + + /* bytes per L1 data cache line */ + #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* bytes per L2 cache line */ + #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() +diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h +index 9ab078a..d6635c2 100644 +--- a/arch/tile/include/asm/uaccess.h ++++ b/arch/tile/include/asm/uaccess.h +@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + +- if (likely(sz == -1 || sz >= n)) ++ if (likely(sz == (size_t)-1 || sz >= n)) + n = _copy_from_user(to, from, n); + else + copy_from_user_overflow(); +diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c +index 650ccff..45fe2d6 100644 +--- a/arch/tile/mm/hugetlbpage.c ++++ b/arch/tile/mm/hugetlbpage.c +@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = 0; + return vm_unmapped_area(&info); + } + +@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = 0; + addr = vm_unmapped_area(&info); + + /* +diff --git a/arch/um/Makefile b/arch/um/Makefile +index 133f7de..1d6f2f1 100644 +--- a/arch/um/Makefile ++++ b/arch/um/Makefile +@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\ + $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \ + $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include + ++ifdef CONSTIFY_PLUGIN ++USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif ++ + #This will adjust *FLAGS accordingly to the platform. + include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS) + +diff --git a/arch/um/defconfig b/arch/um/defconfig +index 08107a7..ab22afe 100644 +--- a/arch/um/defconfig ++++ b/arch/um/defconfig +@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y + CONFIG_X86_L1_CACHE_SHIFT=5 + CONFIG_X86_XADD=y + CONFIG_X86_PPRO_FENCE=y +-CONFIG_X86_WP_WORKS_OK=y + CONFIG_X86_INVLPG=y + CONFIG_X86_BSWAP=y + CONFIG_X86_POPAD_OK=y +diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h +index 19e1bdd..3665b77 100644 +--- a/arch/um/include/asm/cache.h ++++ b/arch/um/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef __UM_CACHE_H + #define __UM_CACHE_H + ++#include <linux/const.h> + + #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) + # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +@@ -12,6 +13,6 @@ + # define L1_CACHE_SHIFT 5 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif +diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h +index 2e0a6b1..a64d0f5 100644 +--- a/arch/um/include/asm/kmap_types.h ++++ b/arch/um/include/asm/kmap_types.h +@@ -8,6 +8,6 @@ + + /* No more #include "asm/arch/kmap_types.h" ! */ + +-#define KM_TYPE_NR 14 ++#define KM_TYPE_NR 15 + + #endif +diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h +index 5ff53d9..5850cdf 100644 +--- a/arch/um/include/asm/page.h ++++ b/arch/um/include/asm/page.h +@@ -14,6 +14,9 @@ + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) + #define PAGE_MASK (~(PAGE_SIZE-1)) + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef __ASSEMBLY__ + + struct page; +diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h +index 0032f92..cd151e0 100644 +--- a/arch/um/include/asm/pgtable-3level.h ++++ b/arch/um/include/asm/pgtable-3level.h +@@ -58,6 +58,7 @@ + #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) + #define pud_populate(mm, pud, pmd) \ + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) ++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) + + #ifdef CONFIG_64BIT + #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval)) +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c +index b462b13..e7a19aa 100644 +--- a/arch/um/kernel/process.c ++++ b/arch/um/kernel/process.c +@@ -386,22 +386,6 @@ int singlestepping(void * t) + return 2; + } + +-/* +- * Only x86 and x86_64 have an arch_align_stack(). +- * All other arches have "#define arch_align_stack(x) (x)" +- * in their asm/system.h +- * As this is included in UML from asm-um/system-generic.h, +- * we can use it to behave as the subarch does. +- */ +-#ifndef arch_align_stack +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} +-#endif +- + unsigned long get_wchan(struct task_struct *p) + { + unsigned long stack_page, sp, ip; +diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h +index ad8f795..2c7eec6 100644 +--- a/arch/unicore32/include/asm/cache.h ++++ b/arch/unicore32/include/asm/cache.h +@@ -12,8 +12,10 @@ + #ifndef __UNICORE_CACHE_H__ + #define __UNICORE_CACHE_H__ + +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#include <linux/const.h> ++ ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index de80b33..c0f0899 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -243,7 +243,7 @@ config X86_HT + + config X86_32_LAZY_GS + def_bool y +- depends on X86_32 && !CC_STACKPROTECTOR ++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF + + config ARCH_HWEIGHT_CFLAGS + string +@@ -1076,6 +1076,7 @@ config MICROCODE_EARLY + + config X86_MSR + tristate "/dev/cpu/*/msr - Model-specific register support" ++ depends on !GRKERNSEC_KMEM + ---help--- + This device gives privileged processes access to the x86 + Model-Specific Registers (MSRs). It is a character device with +@@ -1099,7 +1100,7 @@ choice + + config NOHIGHMEM + bool "off" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Linux can use up to 64 Gigabytes of physical memory on x86 systems. + However, the address space of 32-bit x86 processors is only 4 +@@ -1136,7 +1137,7 @@ config NOHIGHMEM + + config HIGHMEM4G + bool "4GB" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Select this if you have a 32-bit processor and between 1 and 4 + gigabytes of physical RAM. +@@ -1189,7 +1190,7 @@ config PAGE_OFFSET + hex + default 0xB0000000 if VMSPLIT_3G_OPT + default 0x80000000 if VMSPLIT_2G +- default 0x78000000 if VMSPLIT_2G_OPT ++ default 0x70000000 if VMSPLIT_2G_OPT + default 0x40000000 if VMSPLIT_1G + default 0xC0000000 + depends on X86_32 +@@ -1587,6 +1588,7 @@ config SECCOMP + + config CC_STACKPROTECTOR + bool "Enable -fstack-protector buffer overflow detection" ++ depends on X86_64 || !PAX_MEMORY_UDEREF + ---help--- + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on +@@ -1706,6 +1708,8 @@ config X86_NEED_RELOCS + config PHYSICAL_ALIGN + hex "Alignment value to which kernel should be aligned" if X86_32 + default "0x1000000" ++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE ++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE + range 0x2000 0x1000000 + ---help--- + This value puts the alignment restrictions on physical address +@@ -1781,9 +1785,10 @@ config DEBUG_HOTPLUG_CPU0 + If unsure, say N. + + config COMPAT_VDSO +- def_bool y ++ def_bool n + prompt "Compat VDSO support" + depends on X86_32 || IA32_EMULATION ++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF + ---help--- + Map the 32-bit VDSO to the predictable old-style address too. + +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index c026cca..14657ae 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -319,7 +319,7 @@ config X86_PPRO_FENCE + + config X86_F00F_BUG + def_bool y +- depends on M586MMX || M586TSC || M586 || M486 ++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC + + config X86_INVD_BUG + def_bool y +@@ -327,7 +327,7 @@ config X86_INVD_BUG + + config X86_ALIGNMENT_16 + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + + config X86_INTEL_USERCOPY + def_bool y +@@ -373,7 +373,7 @@ config X86_CMPXCHG64 + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + + config X86_MINIMUM_CPU_FAMILY + int +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug +index b322f12..652d0d9 100644 +--- a/arch/x86/Kconfig.debug ++++ b/arch/x86/Kconfig.debug +@@ -84,7 +84,7 @@ config X86_PTDUMP + config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + default y +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && BROKEN + ---help--- + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const +@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST + + config DEBUG_SET_MODULE_RONX + bool "Set loadable kernel module data as NX and text as RO" +- depends on MODULES ++ depends on MODULES && BROKEN + ---help--- + This option helps catch unintended modifications to loadable + kernel module's text and read-only data. It also prevents execution +@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING + + config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict copy size checks" +- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING ++ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW + ---help--- + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time failures. +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 5c47726..8c4fa67 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -54,6 +54,7 @@ else + UTS_MACHINE := x86_64 + CHECKFLAGS += -D__x86_64__ -m64 + ++ biarch := $(call cc-option,-m64) + KBUILD_AFLAGS += -m64 + KBUILD_CFLAGS += -m64 + +@@ -234,3 +235,12 @@ define archhelp + echo ' FDARGS="..." arguments for the booted kernel' + echo ' FDINITRD=file initrd for the booted kernel' + endef ++ ++define OLD_LD ++ ++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. ++*** Please upgrade your binutils to 2.18 or newer ++endef ++ ++archprepare: ++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index 379814b..add62ce 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ + $(call cc-option, -fno-stack-protector) \ + $(call cc-option, -mpreferred-stack-boundary=2) + KBUILD_CFLAGS += $(call cc-option, -m32) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + +diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h +index 878e4b9..20537ab 100644 +--- a/arch/x86/boot/bitops.h ++++ b/arch/x86/boot/bitops.h +@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) + u8 v; + const u32 *p = (const u32 *)addr; + +- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); ++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + return v; + } + +@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) + + static inline void set_bit(int nr, void *addr) + { +- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); ++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + } + + #endif /* BOOT_BITOPS_H */ +diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h +index 5b75319..331a4ca 100644 +--- a/arch/x86/boot/boot.h ++++ b/arch/x86/boot/boot.h +@@ -85,7 +85,7 @@ static inline void io_delay(void) + static inline u16 ds(void) + { + u16 seg; +- asm("movw %%ds,%0" : "=rm" (seg)); ++ asm volatile("movw %%ds,%0" : "=rm" (seg)); + return seg; + } + +@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr) + static inline int memcmp(const void *s1, const void *s2, size_t len) + { + u8 diff; +- asm("repe; cmpsb; setnz %0" ++ asm volatile("repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + return diff; + } +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 5ef205c..342191d 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small + KBUILD_CFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c +index c205035..5853587 100644 +--- a/arch/x86/boot/compressed/eboot.c ++++ b/arch/x86/boot/compressed/eboot.c +@@ -150,7 +150,6 @@ again: + *addr = max_addr; + } + +-free_pool: + efi_call_phys1(sys_table->boottime->free_pool, map); + + fail: +@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align, + if (i == map_size / desc_size) + status = EFI_NOT_FOUND; + +-free_pool: + efi_call_phys1(sys_table->boottime->free_pool, map); + fail: + return status; +diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S +index a53440e..c3dbf1e 100644 +--- a/arch/x86/boot/compressed/efi_stub_32.S ++++ b/arch/x86/boot/compressed/efi_stub_32.S +@@ -46,16 +46,13 @@ ENTRY(efi_call_phys) + * parameter 2, ..., param n. To make things easy, we save the return + * address of efi_call_phys in a global variable. + */ +- popl %ecx +- movl %ecx, saved_return_addr(%edx) +- /* get the function pointer into ECX*/ +- popl %ecx +- movl %ecx, efi_rt_function_ptr(%edx) ++ popl saved_return_addr(%edx) ++ popl efi_rt_function_ptr(%edx) + + /* + * 3. Call the physical function. + */ +- call *%ecx ++ call *efi_rt_function_ptr(%edx) + + /* + * 4. Balance the stack. And because EAX contain the return value, +@@ -67,15 +64,12 @@ ENTRY(efi_call_phys) + 1: popl %edx + subl $1b, %edx + +- movl efi_rt_function_ptr(%edx), %ecx +- pushl %ecx ++ pushl efi_rt_function_ptr(%edx) + + /* + * 10. Push the saved return address onto the stack and return. + */ +- movl saved_return_addr(%edx), %ecx +- pushl %ecx +- ret ++ jmpl *saved_return_addr(%edx) + ENDPROC(efi_call_phys) + .previous + +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 1e3184f..0d11e2e 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -118,7 +118,7 @@ preferred_addr: + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -204,7 +204,7 @@ relocated: + * and where it was actually loaded. + */ + movl %ebp, %ebx +- subl $LOAD_PHYSICAL_ADDR, %ebx ++ subl $____LOAD_PHYSICAL_ADDR, %ebx + jz 2f /* Nothing to be done if loaded at compiled addr. */ + /* + * Process relocations. +@@ -212,8 +212,7 @@ relocated: + + 1: subl $4, %edi + movl (%edi), %ecx +- testl %ecx, %ecx +- jz 2f ++ jecxz 2f + addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) + jmp 1b + 2: +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index c1d383d..57ab51c 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -97,7 +97,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -272,7 +272,7 @@ preferred_addr: + notq %rax + andq %rax, %rbp + #else +- movq $LOAD_PHYSICAL_ADDR, %rbp ++ movq $____LOAD_PHYSICAL_ADDR, %rbp + #endif + + /* Target address to relocate to for decompression */ +@@ -363,8 +363,8 @@ gdt: + .long gdt + .word 0 + .quad 0x0000000000000000 /* NULL descriptor */ +- .quad 0x00af9a000000ffff /* __KERNEL_CS */ +- .quad 0x00cf92000000ffff /* __KERNEL_DS */ ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */ ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */ + .quad 0x0080890000000000 /* TS descriptor */ + .quad 0x0000000000000000 /* TS continued */ + gdt_end: +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index 7cb56c6..d382d84 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -303,7 +303,7 @@ static void parse_elf(void *output) + case PT_LOAD: + #ifdef CONFIG_RELOCATABLE + dest = output; +- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); ++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); + #else + dest = (void *)(phdr->p_paddr); + #endif +@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, + error("Destination address too large"); + #endif + #ifndef CONFIG_RELOCATABLE +- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) ++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) + error("Wrong destination address"); + #endif + +diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c +index 4d3ff03..e4972ff 100644 +--- a/arch/x86/boot/cpucheck.c ++++ b/arch/x86/boot/cpucheck.c +@@ -74,7 +74,7 @@ static int has_fpu(void) + u16 fcw = -1, fsw = -1; + u32 cr0; + +- asm("movl %%cr0,%0" : "=r" (cr0)); ++ asm volatile("movl %%cr0,%0" : "=r" (cr0)); + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { + cr0 &= ~(X86_CR0_EM|X86_CR0_TS); + asm volatile("movl %0,%%cr0" : : "r" (cr0)); +@@ -90,7 +90,7 @@ static int has_eflag(u32 mask) + { + u32 f0, f1; + +- asm("pushfl ; " ++ asm volatile("pushfl ; " + "pushfl ; " + "popl %0 ; " + "movl %0,%1 ; " +@@ -115,7 +115,7 @@ static void get_flags(void) + set_bit(X86_FEATURE_FPU, cpu.flags); + + if (has_eflag(X86_EFLAGS_ID)) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_intel_level), + "=b" (cpu_vendor[0]), + "=d" (cpu_vendor[1]), +@@ -124,7 +124,7 @@ static void get_flags(void) + + if (max_intel_level >= 0x00000001 && + max_intel_level <= 0x0000ffff) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (tfms), + "=c" (cpu.flags[4]), + "=d" (cpu.flags[0]) +@@ -136,7 +136,7 @@ static void get_flags(void) + cpu.model += ((tfms >> 16) & 0xf) << 4; + } + +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_amd_level) + : "a" (0x80000000) + : "ebx", "ecx", "edx"); +@@ -144,7 +144,7 @@ static void get_flags(void) + if (max_amd_level >= 0x80000001 && + max_amd_level <= 0x8000ffff) { + u32 eax = 0x80000001; +- asm("cpuid" ++ asm volatile("cpuid" + : "+a" (eax), + "=c" (cpu.flags[6]), + "=d" (cpu.flags[1]) +@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_K7_HWCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax &= ~(1 << 15); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + get_flags(); /* Make sure it really did something */ + err = check_flags(); +@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_VIA_FCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax |= (1<<1)|(1<<7); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + set_bit(X86_FEATURE_CX8, cpu.flags); + err = check_flags(); +@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 eax, edx; + u32 level = 1; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); +- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); +- asm("cpuid" ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); ++ asm volatile("cpuid" + : "+a" (level), "=d" (cpu.flags[0]) + : : "ecx", "ebx"); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + err = check_flags(); + } +diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S +index 9ec06a1..2c25e79 100644 +--- a/arch/x86/boot/header.S ++++ b/arch/x86/boot/header.S +@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to + # single linked list of + # struct setup_data + +-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr ++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr + + #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR) ++#else + #define VO_INIT_SIZE (VO__end - VO__text) ++#endif + #if ZO_INIT_SIZE > VO_INIT_SIZE + #define INIT_SIZE ZO_INIT_SIZE + #else +diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c +index db75d07..8e6d0af 100644 +--- a/arch/x86/boot/memory.c ++++ b/arch/x86/boot/memory.c +@@ -19,7 +19,7 @@ + + static int detect_memory_e820(void) + { +- int count = 0; ++ unsigned int count = 0; + struct biosregs ireg, oreg; + struct e820entry *desc = boot_params.e820_map; + static struct e820entry buf; /* static so it is zeroed */ +diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c +index 11e8c6e..fdbb1ed 100644 +--- a/arch/x86/boot/video-vesa.c ++++ b/arch/x86/boot/video-vesa.c +@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) + + boot_params.screen_info.vesapm_seg = oreg.es; + boot_params.screen_info.vesapm_off = oreg.di; ++ boot_params.screen_info.vesapm_size = oreg.cx; + } + + /* +diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c +index 43eda28..5ab5fdb 100644 +--- a/arch/x86/boot/video.c ++++ b/arch/x86/boot/video.c +@@ -96,7 +96,7 @@ static void store_mode_params(void) + static unsigned int get_entry(void) + { + char entry_buf[4]; +- int i, len = 0; ++ unsigned int i, len = 0; + int key; + unsigned int v; + +diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S +index 9105655..5e37f27 100644 +--- a/arch/x86/crypto/aes-x86_64-asm_64.S ++++ b/arch/x86/crypto/aes-x86_64-asm_64.S +@@ -8,6 +8,8 @@ + * including this sentence is retained in full. + */ + ++#include <asm/alternative-asm.h> ++ + .extern crypto_ft_tab + .extern crypto_it_tab + .extern crypto_fl_tab +@@ -70,6 +72,8 @@ + je B192; \ + leaq 32(r9),r9; + ++#define ret pax_force_retaddr 0, 1; ret ++ + #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \ + movq r1,r2; \ + movq r3,r4; \ +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index 04b7977..402f223 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -31,6 +31,7 @@ + + #include <linux/linkage.h> + #include <asm/inst.h> ++#include <asm/alternative-asm.h> + + #ifdef __x86_64__ + .data +@@ -1435,6 +1436,7 @@ _return_T_done_decrypt: + pop %r14 + pop %r13 + pop %r12 ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_gcm_dec) + +@@ -1699,6 +1701,7 @@ _return_T_done_encrypt: + pop %r14 + pop %r13 + pop %r12 ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_gcm_enc) + +@@ -1716,6 +1719,7 @@ _key_expansion_256a: + pxor %xmm1, %xmm0 + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + ENDPROC(_key_expansion_128) + ENDPROC(_key_expansion_256a) +@@ -1742,6 +1746,7 @@ _key_expansion_192a: + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 0x10(TKEYP) + add $0x20, TKEYP ++ pax_force_retaddr_bts + ret + ENDPROC(_key_expansion_192a) + +@@ -1762,6 +1767,7 @@ _key_expansion_192b: + + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + ENDPROC(_key_expansion_192b) + +@@ -1775,6 +1781,7 @@ _key_expansion_256b: + pxor %xmm1, %xmm2 + movaps %xmm2, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + ENDPROC(_key_expansion_256b) + +@@ -1888,6 +1895,7 @@ ENTRY(aesni_set_key) + #ifndef __x86_64__ + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_set_key) + +@@ -1910,6 +1918,7 @@ ENTRY(aesni_enc) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_enc) + +@@ -1968,6 +1977,7 @@ _aesni_enc1: + AESENC KEY STATE + movaps 0x70(TKEYP), KEY + AESENCLAST KEY STATE ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_enc1) + +@@ -2077,6 +2087,7 @@ _aesni_enc4: + AESENCLAST KEY STATE2 + AESENCLAST KEY STATE3 + AESENCLAST KEY STATE4 ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_enc4) + +@@ -2100,6 +2111,7 @@ ENTRY(aesni_dec) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_dec) + +@@ -2158,6 +2170,7 @@ _aesni_dec1: + AESDEC KEY STATE + movaps 0x70(TKEYP), KEY + AESDECLAST KEY STATE ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_dec1) + +@@ -2267,6 +2280,7 @@ _aesni_dec4: + AESDECLAST KEY STATE2 + AESDECLAST KEY STATE3 + AESDECLAST KEY STATE4 ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_dec4) + +@@ -2325,6 +2339,7 @@ ENTRY(aesni_ecb_enc) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_ecb_enc) + +@@ -2384,6 +2399,7 @@ ENTRY(aesni_ecb_dec) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_ecb_dec) + +@@ -2426,6 +2442,7 @@ ENTRY(aesni_cbc_enc) + popl LEN + popl IVP + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_cbc_enc) + +@@ -2517,6 +2534,7 @@ ENTRY(aesni_cbc_dec) + popl LEN + popl IVP + #endif ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_cbc_dec) + +@@ -2544,6 +2562,7 @@ _aesni_inc_init: + mov $1, TCTR_LOW + MOVQ_R64_XMM TCTR_LOW INC + MOVQ_R64_XMM CTR TCTR_LOW ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_inc_init) + +@@ -2573,6 +2592,7 @@ _aesni_inc: + .Linc_low: + movaps CTR, IV + PSHUFB_XMM BSWAP_MASK IV ++ pax_force_retaddr_bts + ret + ENDPROC(_aesni_inc) + +@@ -2634,6 +2654,7 @@ ENTRY(aesni_ctr_enc) + .Lctr_enc_ret: + movups IV, (IVP) + .Lctr_enc_just_ret: ++ pax_force_retaddr 0, 1 + ret + ENDPROC(aesni_ctr_enc) + #endif +diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S +index 246c670..4d1ed00 100644 +--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S ++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "blowfish-x86_64-asm.S" + .text +@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk) + jnz .L__enc_xor; + + write_block(); ++ pax_force_retaddr 0, 1 + ret; + .L__enc_xor: + xor_block(); ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__blowfish_enc_blk) + +@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk) + + movq %r11, %rbp; + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(blowfish_dec_blk) + +@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way) + + popq %rbx; + popq %rbp; ++ pax_force_retaddr 0, 1 + ret; + + .L__enc_xor4: +@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way) + + popq %rbx; + popq %rbp; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__blowfish_enc_blk_4way) + +@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way) + popq %rbx; + popq %rbp; + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(blowfish_dec_blk_4way) +diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S +index 310319c..ce174a4 100644 +--- a/arch/x86/crypto/camellia-x86_64-asm_64.S ++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "camellia-x86_64-asm_64.S" + .text +@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk) + enc_outunpack(mov, RT1); + + movq RRBP, %rbp; ++ pax_force_retaddr 0, 1 + ret; + + .L__enc_xor: + enc_outunpack(xor, RT1); + + movq RRBP, %rbp; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__camellia_enc_blk) + +@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk) + dec_outunpack(); + + movq RRBP, %rbp; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(camellia_dec_blk) + +@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way) + + movq RRBP, %rbp; + popq %rbx; ++ pax_force_retaddr 0, 1 + ret; + + .L__enc2_xor: +@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way) + + movq RRBP, %rbp; + popq %rbx; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__camellia_enc_blk_2way) + +@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way) + + movq RRBP, %rbp; + movq RXOR, %rbx; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(camellia_dec_blk_2way) +diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +index c35fd5d..c1ee236 100644 +--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "cast5-avx-x86_64-asm_64.S" + +@@ -281,6 +282,7 @@ __cast5_enc_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__cast5_enc_blk16) + +@@ -352,6 +354,7 @@ __cast5_dec_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + ++ pax_force_retaddr 0, 1 + ret; + + .L__skip_dec: +@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way) + vmovdqu RR4, (6*4*4)(%r11); + vmovdqu RL4, (7*4*4)(%r11); + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ecb_enc_16way) + +@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way) + vmovdqu RR4, (6*4*4)(%r11); + vmovdqu RL4, (7*4*4)(%r11); + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ecb_dec_16way) + +@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way) + + popq %r12; + ++ pax_force_retaddr + ret; + ENDPROC(cast5_cbc_dec_16way) + +@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way) + + popq %r12; + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ctr_16way) +diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +index f93b610..c09bf40 100644 +--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "cast6-avx-x86_64-asm_64.S" +@@ -293,6 +294,7 @@ __cast6_enc_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__cast6_enc_blk8) + +@@ -338,6 +340,7 @@ __cast6_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__cast6_dec_blk8) + +@@ -356,6 +359,7 @@ ENTRY(cast6_ecb_enc_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ecb_enc_8way) + +@@ -374,6 +378,7 @@ ENTRY(cast6_ecb_dec_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ecb_dec_8way) + +@@ -397,6 +402,7 @@ ENTRY(cast6_cbc_dec_8way) + + popq %r12; + ++ pax_force_retaddr + ret; + ENDPROC(cast6_cbc_dec_8way) + +@@ -422,5 +428,6 @@ ENTRY(cast6_ctr_8way) + + popq %r12; + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ctr_8way) +diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S +index 9279e0b..9270820 100644 +--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S ++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S +@@ -1,4 +1,5 @@ + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + # enter salsa20_encrypt_bytes + ENTRY(salsa20_encrypt_bytes) +@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr 0, 1 + ret + # bytesatleast65: + ._bytesatleast65: +@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + ENDPROC(salsa20_keysetup) + +@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + ENDPROC(salsa20_ivsetup) +diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +index 43c9386..a0e2d60 100644 +--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +@@ -25,6 +25,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "serpent-avx-x86_64-asm_64.S" +@@ -617,6 +618,7 @@ __serpent_enc_blk8_avx: + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_enc_blk8_avx) + +@@ -671,6 +673,7 @@ __serpent_dec_blk8_avx: + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_dec_blk8_avx) + +@@ -687,6 +690,7 @@ ENTRY(serpent_ecb_enc_8way_avx) + + store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_enc_8way_avx) + +@@ -703,6 +707,7 @@ ENTRY(serpent_ecb_dec_8way_avx) + + store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_dec_8way_avx) + +@@ -719,6 +724,7 @@ ENTRY(serpent_cbc_dec_8way_avx) + + store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_cbc_dec_8way_avx) + +@@ -737,5 +743,6 @@ ENTRY(serpent_ctr_8way_avx) + + store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ctr_8way_avx) +diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +index acc066c..1559cc4 100644 +--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +@@ -25,6 +25,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "serpent-sse2-x86_64-asm_64.S" + .text +@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way) + write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + + .L__enc_xor8: + xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_enc_blk_8way) + +@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way) + write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_dec_blk_8way) +diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S +index a410950..3356d42 100644 +--- a/arch/x86/crypto/sha1_ssse3_asm.S ++++ b/arch/x86/crypto/sha1_ssse3_asm.S +@@ -29,6 +29,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define CTX %rdi // arg1 + #define BUF %rsi // arg2 +@@ -104,6 +105,7 @@ + pop %r12 + pop %rbp + pop %rbx ++ pax_force_retaddr 0, 1 + ret + + ENDPROC(\name) +diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +index 8d3e113..898b161 100644 +--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "twofish-avx-x86_64-asm_64.S" +@@ -282,6 +283,7 @@ __twofish_enc_blk8: + outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); + outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__twofish_enc_blk8) + +@@ -322,6 +324,7 @@ __twofish_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); + outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__twofish_dec_blk8) + +@@ -340,6 +343,7 @@ ENTRY(twofish_ecb_enc_8way) + + store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(twofish_ecb_enc_8way) + +@@ -358,6 +362,7 @@ ENTRY(twofish_ecb_dec_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(twofish_ecb_dec_8way) + +@@ -381,6 +386,7 @@ ENTRY(twofish_cbc_dec_8way) + + popq %r12; + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(twofish_cbc_dec_8way) + +@@ -406,5 +412,6 @@ ENTRY(twofish_ctr_8way) + + popq %r12; + ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(twofish_ctr_8way) +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +index 1c3b7ce..b365c5e 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "twofish-x86_64-asm-3way.S" + .text +@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + + .L__enc_xor3: +@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(__twofish_enc_blk_3way) + +@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + ENDPROC(twofish_dec_blk_3way) +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S +index a039d21..29e7615 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S +@@ -22,6 +22,7 @@ + + #include <linux/linkage.h> + #include <asm/asm-offsets.h> ++#include <asm/alternative-asm.h> + + #define a_offset 0 + #define b_offset 4 +@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk) + + popq R1 + movq $1,%rax ++ pax_force_retaddr 0, 1 + ret + ENDPROC(twofish_enc_blk) + +@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk) + + popq R1 + movq $1,%rax ++ pax_force_retaddr 0, 1 + ret + ENDPROC(twofish_dec_blk) +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c +index 03abf9b..a42ba29 100644 +--- a/arch/x86/ia32/ia32_aout.c ++++ b/arch/x86/ia32/ia32_aout.c +@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, + unsigned long dump_start, dump_size; + struct user32 dump; + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c +index cf1a471..3bc4cf8 100644 +--- a/arch/x86/ia32/ia32_signal.c ++++ b/arch/x86/ia32/ia32_signal.c +@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, + sp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + return (void __user *) sp; + } + +@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, + * These are actually not used anymore, but left because some + * gdb versions depend on them as a marker. + */ +- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, + 0xb8, + __NR_ia32_rt_sigreturn, + 0x80cd, +- 0, ++ 0 + }; + + frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); +@@ -463,16 +463,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, + + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = ksig->ka.sa.sa_restorer; ++ else if (current->mm->context.vdso) ++ /* Return stub is in 32bit vsyscall page */ ++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + else +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, +- rt_sigreturn); ++ restorer = &frame->retcode; + put_user_ex(ptr_to_compat(restorer), &frame->pretcode); + + /* + * Not actually used anymore, but left because some gdb + * versions need it. + */ +- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + err |= copy_siginfo_to_user32(&frame->info, &ksig->info); +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 474dc1b..24aaa3e 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -15,8 +15,10 @@ + #include <asm/irqflags.h> + #include <asm/asm.h> + #include <asm/smap.h> ++#include <asm/pgtable.h> + #include <linux/linkage.h> + #include <linux/err.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit) + ENDPROC(native_irq_enable_sysexit) + #endif + ++ .macro pax_enter_kernel_user ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_user ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushq %rax ++ pushq %r11 ++ call pax_randomize_kstack ++ popq %r11 ++ popq %rax ++#endif ++ .endm ++ ++ .macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++ .endm ++ + /* + * 32bit SYSENTER instruction entry. + * +@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target) + CFI_REGISTER rsp,rbp + SWAPGS_UNSAFE_STACK + movq PER_CPU_VAR(kernel_stack), %rsp +- addq $(KERNEL_STACK_OFFSET),%rsp +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs, here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %ebp,%ebp /* zero extension */ + pushq_cfi $__USER32_DS + /*CFI_REL_OFFSET ss,0*/ +@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target) + CFI_REL_OFFSET rsp,0 + pushfq_cfi + /*CFI_REL_OFFSET rflags,0*/ +- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d +- CFI_REGISTER rip,r10 ++ orl $X86_EFLAGS_IF,(%rsp) ++ GET_THREAD_INFO(%r11) ++ movl TI_sysenter_return(%r11), %r11d ++ CFI_REGISTER rip,r11 + pushq_cfi $__USER32_CS + /*CFI_REL_OFFSET cs,0*/ + movl %eax, %eax +- pushq_cfi %r10 ++ pushq_cfi %r11 + CFI_REL_OFFSET rip,0 + pushq_cfi %rax + cld + SAVE_ARGS 0,1,0 ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs, here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) + /* no need to do an access_ok check here because rbp has been + 32bit zero extended */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov pax_user_shadow_base,%r11 ++ add %r11,%rbp ++#endif ++ + ASM_STAC + 1: movl (%rbp),%ebp + _ASM_EXTABLE(1b,ia32_badarg) + ASM_CLAC +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz sysenter_tracesys + cmpq $(IA32_NR_syscalls-1),%rax +@@ -162,12 +204,15 @@ sysenter_do_call: + sysenter_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysexit_audit + sysexit_from_sys_call: +- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) + /* clear IF, that popfq doesn't enable interrupts early */ + andl $~0x200,EFLAGS-R11(%rsp) + movl RIP-R11(%rsp),%edx /* User %eip */ +@@ -193,6 +238,9 @@ sysexit_from_sys_call: + movl %eax,%esi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call __audit_syscall_entry ++ ++ pax_erase_kstack ++ + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -204,7 +252,7 @@ sysexit_from_sys_call: + .endm + + .macro auditsys_exit exit +- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jnz ia32_ret_from_sys_call + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +@@ -215,11 +263,12 @@ sysexit_from_sys_call: + 1: setbe %al /* 1 if error, 0 if not */ + movzbl %al,%edi /* zero-extend that into %edi */ + call __audit_syscall_exit ++ GET_THREAD_INFO(%r11) + movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl %edi,TI_flags(%r11) + jz \exit + CLEAR_RREGS -ARGOFFSET + jmp int_with_check +@@ -237,7 +286,7 @@ sysexit_audit: + + sysenter_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz sysenter_auditsys + #endif + SAVE_REST +@@ -249,6 +298,9 @@ sysenter_tracesys: + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp sysenter_do_call + CFI_ENDPROC + ENDPROC(ia32_sysenter_target) +@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target) + ENTRY(ia32_cstar_target) + CFI_STARTPROC32 simple + CFI_SIGNAL_FRAME +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET ++ CFI_DEF_CFA rsp,0 + CFI_REGISTER rip,rcx + /*CFI_REGISTER rflags,r11*/ + SWAPGS_UNSAFE_STACK + movl %esp,%r8d + CFI_REGISTER rsp,r8 + movq PER_CPU_VAR(kernel_stack),%rsp ++ SAVE_ARGS 8*6,0,0 ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs and here we enable it straight after entry: + */ + ENABLE_INTERRUPTS(CLBR_NONE) +- SAVE_ARGS 8,0,0 + movl %eax,%eax /* zero extension */ + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rcx,RIP-ARGOFFSET(%rsp) +@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target) + /* no need to do an access_ok check here because r8 has been + 32bit zero extended */ + /* hardware stack frame is complete now */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov pax_user_shadow_base,%r11 ++ add %r11,%r8 ++#endif ++ + ASM_STAC + 1: movl (%r8),%r9d + _ASM_EXTABLE(1b,ia32_badarg) + ASM_CLAC +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz cstar_tracesys + cmpq $IA32_NR_syscalls-1,%rax +@@ -319,12 +384,15 @@ cstar_do_call: + cstar_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysretl_audit + sysretl_from_sys_call: +- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) + RESTORE_ARGS 0,-ARG_SKIP,0,0,0 + movl RIP-ARGOFFSET(%rsp),%ecx + CFI_REGISTER rip,rcx +@@ -352,7 +420,7 @@ sysretl_audit: + + cstar_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz cstar_auditsys + #endif + xchgl %r9d,%ebp +@@ -366,6 +434,9 @@ cstar_tracesys: + xchgl %ebp,%r9d + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp cstar_do_call + END(ia32_cstar_target) + +@@ -407,19 +478,26 @@ ENTRY(ia32_syscall) + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME + SWAPGS +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs and here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %eax,%eax + pushq_cfi %rax + cld + /* note the registers are not zero extended to the sf. + this could be a problem. */ + SAVE_ARGS 0,1,0 +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs and here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + jnz ia32_tracesys + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -442,6 +520,9 @@ ia32_tracesys: + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp ia32_do_call + END(ia32_syscall) + +diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c +index ad7a20c..1ffa3c1 100644 +--- a/arch/x86/ia32/sys_ia32.c ++++ b/arch/x86/ia32/sys_ia32.c +@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, + */ + static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) + { +- typeof(ubuf->st_uid) uid = 0; +- typeof(ubuf->st_gid) gid = 0; ++ typeof(((struct stat64 *)0)->st_uid) uid = 0; ++ typeof(((struct stat64 *)0)->st_gid) gid = 0; + SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); + SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); + if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || +@@ -205,7 +205,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, + return -EFAULT; + + set_fs(KERNEL_DS); +- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, ++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL, + count); + set_fs(old_fs); + +diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h +index 372231c..a5aa1a1 100644 +--- a/arch/x86/include/asm/alternative-asm.h ++++ b/arch/x86/include/asm/alternative-asm.h +@@ -18,6 +18,45 @@ + .endm + #endif + ++#ifdef KERNEXEC_PLUGIN ++ .macro pax_force_retaddr_bts rip=0 ++ btsq $63,\rip(%rsp) ++ .endm ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ .macro pax_force_retaddr rip=0, reload=0 ++ btsq $63,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ btsq $63,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ .macro pax_force_retaddr rip=0, reload=0 ++ .if \reload ++ pax_set_fptr_mask ++ .endif ++ orq %r10,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ orq %r10,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ movabs $0x8000000000000000,%r10 ++ .endm ++#endif ++#else ++ .macro pax_force_retaddr rip=0, reload=0 ++ .endm ++ .macro pax_force_fptr ptr ++ .endm ++ .macro pax_force_retaddr_bts rip=0 ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++ + .macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h +index 58ed6d9..f1cbe58 100644 +--- a/arch/x86/include/asm/alternative.h ++++ b/arch/x86/include/asm/alternative.h +@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end) + ".pushsection .discard,\"aw\",@progbits\n" \ + DISCARD_ENTRY(1) \ + ".popsection\n" \ +- ".pushsection .altinstr_replacement, \"ax\"\n" \ ++ ".pushsection .altinstr_replacement, \"a\"\n" \ + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ".popsection" + +@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end) + DISCARD_ENTRY(1) \ + DISCARD_ENTRY(2) \ + ".popsection\n" \ +- ".pushsection .altinstr_replacement, \"ax\"\n" \ ++ ".pushsection .altinstr_replacement, \"a\"\n" \ + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ".popsection" +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 3388034..050f0b9 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void) + + #ifdef CONFIG_X86_LOCAL_APIC + +-extern unsigned int apic_verbosity; ++extern int apic_verbosity; + extern int local_apic_timer_c2_ok; + + extern int disable_apic; +diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h +index 20370c6..a2eb9b0 100644 +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h +index 722aa3b..3a0bb27 100644 +--- a/arch/x86/include/asm/atomic.h ++++ b/arch/x86/include/asm/atomic.h +@@ -22,7 +22,18 @@ + */ + static inline int atomic_read(const atomic_t *v) + { +- return (*(volatile int *)&(v)->counter); ++ return (*(volatile const int *)&(v)->counter); ++} ++ ++/** ++ * atomic_read_unchecked - read atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ */ ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return (*(volatile const int *)&(v)->counter); + } + + /** +@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i) + } + + /** ++ * atomic_set_unchecked - set atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t +@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i) + */ + static inline void atomic_add(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "addl %1,%0" ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_add_unchecked - add integer to atomic variable ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v) + */ + static inline void atomic_sub(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "subl %1,%0" ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_sub_unchecked - subtract integer from atomic variable ++ * @i: integer value to subtract ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) + */ + static inline void atomic_inc(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "incl %0" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_inc_unchecked - increment atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "incl %0\n" + : "+m" (v->counter)); + } + +@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v) + */ + static inline void atomic_dec(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "decl %0" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_dec_unchecked - decrement atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decl %0\n" + : "+m" (v->counter)); + } + +@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decl %0; sete %1" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incl %0; sete %1" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" ++ : "+m" (v->counter), "=qm" (c) ++ : : "memory"); ++ return c != 0; ++} ++ ++/** ++ * atomic_inc_and_test_unchecked - increment and test ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1 ++ * and returns true if the result is zero, or false for all ++ * other cases. ++ */ ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ unsigned char c; ++ ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v) + */ + static inline int atomic_add_return(int i, atomic_t *v) + { ++ return i + xadd_check_overflow(&v->counter, i); ++} ++ ++/** ++ * atomic_add_return_unchecked - add integer and return ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ + return i + xadd(&v->counter, i); + } + +@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) + } + + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + return cmpxchg(&v->counter, old, new); + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ + static inline int atomic_xchg(atomic_t *v, int new) + { + return xchg(&v->counter, new); + } + ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} ++ + /** + * __atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t +@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new) + */ + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "subl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + } + + /** ++ * atomic_inc_not_zero_hint - increment if not null ++ * @v: pointer of type atomic_t ++ * @hint: probable value of the atomic before the increment ++ * ++ * This version of atomic_inc_not_zero() gives a hint of probable ++ * value of the atomic. This helps processor to not read the memory ++ * before doing the atomic read/modify/write cycle, lowering ++ * number of bus transactions on some arches. ++ * ++ * Returns: 0 if increment was not done, 1 otherwise. ++ */ ++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint ++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) ++{ ++ int val, c = hint, new; ++ ++ /* sanity test, should be removed by compiler if hint is a constant */ ++ if (!hint) ++ return __atomic_add_unless(v, 1, 0); ++ ++ do { ++ asm volatile("incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c)); ++ ++ val = atomic_cmpxchg(v, c, new); ++ if (val == c) ++ return 1; ++ c = val; ++ } while (c); ++ ++ return 0; ++} ++ ++/** + * atomic_inc_short - increment of a short integer + * @v: pointer to type int + * +@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) + #endif + + /* These are x86-specific, used by some header files */ +-#define atomic_clear_mask(mask, addr) \ +- asm volatile(LOCK_PREFIX "andl %0,%1" \ +- : : "r" (~(mask)), "m" (*(addr)) : "memory") +- +-#define atomic_set_mask(mask, addr) \ +- asm volatile(LOCK_PREFIX "orl %0,%1" \ +- : : "r" ((unsigned)(mask)), "m" (*(addr)) \ +- : "memory") ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ++{ ++ asm volatile(LOCK_PREFIX "andl %1,%0" ++ : "+m" (v->counter) ++ : "r" (~(mask)) ++ : "memory"); ++} ++ ++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "andl %1,%0" ++ : "+m" (v->counter) ++ : "r" (~(mask)) ++ : "memory"); ++} ++ ++static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ++{ ++ asm volatile(LOCK_PREFIX "orl %1,%0" ++ : "+m" (v->counter) ++ : "r" (mask) ++ : "memory"); ++} ++ ++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "orl %1,%0" ++ : "+m" (v->counter) ++ : "r" (mask) ++ : "memory"); ++} + + /* Atomic operations are already serializing on x86 */ + #define smp_mb__before_atomic_dec() barrier() +diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h +index b154de7..aadebd8 100644 +--- a/arch/x86/include/asm/atomic64_32.h ++++ b/arch/x86/include/asm/atomic64_32.h +@@ -12,6 +12,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(val) { (val) } + + #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) +@@ -37,21 +45,31 @@ typedef struct { + ATOMIC64_DECL_ONE(sym##_386) + + ATOMIC64_DECL_ONE(add_386); ++ATOMIC64_DECL_ONE(add_unchecked_386); + ATOMIC64_DECL_ONE(sub_386); ++ATOMIC64_DECL_ONE(sub_unchecked_386); + ATOMIC64_DECL_ONE(inc_386); ++ATOMIC64_DECL_ONE(inc_unchecked_386); + ATOMIC64_DECL_ONE(dec_386); ++ATOMIC64_DECL_ONE(dec_unchecked_386); + #endif + + #define alternative_atomic64(f, out, in...) \ + __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) + + ATOMIC64_DECL(read); ++ATOMIC64_DECL(read_unchecked); + ATOMIC64_DECL(set); ++ATOMIC64_DECL(set_unchecked); + ATOMIC64_DECL(xchg); + ATOMIC64_DECL(add_return); ++ATOMIC64_DECL(add_return_unchecked); + ATOMIC64_DECL(sub_return); ++ATOMIC64_DECL(sub_return_unchecked); + ATOMIC64_DECL(inc_return); ++ATOMIC64_DECL(inc_return_unchecked); + ATOMIC64_DECL(dec_return); ++ATOMIC64_DECL(dec_return_unchecked); + ATOMIC64_DECL(dec_if_positive); + ATOMIC64_DECL(inc_not_zero); + ATOMIC64_DECL(add_unless); +@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n + } + + /** ++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable ++ * @p: pointer to type atomic64_unchecked_t ++ * @o: expected value ++ * @n: new value ++ * ++ * Atomically sets @v to @n if it was equal to @o and returns ++ * the old value. ++ */ ++ ++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) ++{ ++ return cmpxchg64(&v->counter, o, n); ++} ++ ++/** + * atomic64_xchg - xchg atomic64 variable + * @v: pointer to type atomic64_t + * @n: value to assign +@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @n: value to assign ++ * ++ * Atomically sets the value of @v to @n. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) ++{ ++ unsigned high = (unsigned)(i >> 32); ++ unsigned low = (unsigned)i; ++ alternative_atomic64(set, /* no output */, ++ "S" (v), "b" (low), "c" (high) ++ : "eax", "edx", "memory"); ++} ++ ++/** + * atomic64_read - read atomic64 variable + * @v: pointer to type atomic64_t + * +@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v) + } + + /** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v and returns it. ++ */ ++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v) ++{ ++ long long r; ++ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); ++ return r; ++ } ++ ++/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) + return i; + } + ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + *@v ++ */ ++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ alternative_atomic64(add_return_unchecked, ++ ASM_OUTPUT2("+A" (i), "+c" (v)), ++ ASM_NO_INPUT_CLOBBER("memory")); ++ return i; ++} ++ + /* + * Other variants with different arithmetic operators: + */ +@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v) + return a; + } + ++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ long long a; ++ alternative_atomic64(inc_return_unchecked, "=&A" (a), ++ "S" (v) : "memory", "ecx"); ++ return a; ++} ++ + static inline long long atomic64_dec_return(atomic64_t *v) + { + long long a; +@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v) + } + + /** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ __alternative_atomic64(add_unchecked, add_return_unchecked, ++ ASM_OUTPUT2("+A" (i), "+c" (v)), ++ ASM_NO_INPUT_CLOBBER("memory")); ++ return i; ++} ++ ++/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t +diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h +index 0e1cbfc..5623683 100644 +--- a/arch/x86/include/asm/atomic64_64.h ++++ b/arch/x86/include/asm/atomic64_64.h +@@ -18,7 +18,19 @@ + */ + static inline long atomic64_read(const atomic64_t *v) + { +- return (*(volatile long *)&(v)->counter); ++ return (*(volatile const long *)&(v)->counter); ++} ++ ++/** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer of type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ * Doesn't imply a read memory barrier. ++ */ ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return (*(volatile const long *)&(v)->counter); + } + + /** +@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) + */ + static inline void atomic64_add(long i, atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "addq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v) + */ + static inline void atomic64_sub(long i, atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "subq %1,%0" ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_sub_unchecked - subtract the atomic64 variable ++ * @i: integer value to subtract ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); + } +@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) + */ + static inline void atomic64_inc(atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_inc_unchecked - increment atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); +@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v) + */ + static inline void atomic64_dec(atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "decq %0" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_dec_unchecked - decrement atomic64 variable ++ * @v: pointer to type atomic64_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decq %0\n" + : "=m" (v->counter) + : "m" (v->counter)); + } +@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decq %0; sete %1" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incq %0; sete %1" ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) + */ + static inline long atomic64_add_return(long i, atomic64_t *v) + { ++ return i + xadd_check_overflow(&v->counter, i); ++} ++ ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ + return i + xadd(&v->counter, i); + } + +@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) + } + + #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_return_unchecked(1, v); ++} + #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) + + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) + return cmpxchg(&v->counter, old, new); + } + ++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ + static inline long atomic64_xchg(atomic64_t *v, long new) + { + return xchg(&v->counter, new); +@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) + */ + static inline int atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("add %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h +index 6dfd019..28e188d 100644 +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -40,7 +40,7 @@ + * a mask operation on a byte. + */ + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) ++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) + #define CONST_MASK(nr) (1 << ((nr) & 7)) + + /** +@@ -486,7 +486,7 @@ static inline int fls(int x) + * at position 64. + */ + #ifdef CONFIG_X86_64 +-static __always_inline int fls64(__u64 x) ++static __always_inline long fls64(__u64 x) + { + int bitpos = -1; + /* +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h +index 4fa687a..60f2d39 100644 +--- a/arch/x86/include/asm/boot.h ++++ b/arch/x86/include/asm/boot.h +@@ -6,10 +6,15 @@ + #include <uapi/asm/boot.h> + + /* Physical address where kernel should be loaded. */ +-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ ++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + ++#ifndef __ASSEMBLY__ ++extern unsigned char __LOAD_PHYSICAL_ADDR[]; ++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) ++#endif ++ + /* Minimum kernel alignment, as a power of two */ + #ifdef CONFIG_X86_64 + #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT +diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h +index 48f99f1..d78ebf9 100644 +--- a/arch/x86/include/asm/cache.h ++++ b/arch/x86/include/asm/cache.h +@@ -5,12 +5,13 @@ + + /* L1 cache line size */ + #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__((__section__(".data..read_only"))) + + #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT +-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) ++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) + + #ifdef CONFIG_X86_VSMP + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h +index 9863ee3..4a1f8e1 100644 +--- a/arch/x86/include/asm/cacheflush.h ++++ b/arch/x86/include/asm/cacheflush.h +@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg) + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) +- return -1; ++ return ~0UL; + else if (pg_flags == _PGMT_WC) + return _PAGE_CACHE_WC; + else if (pg_flags == _PGMT_UC_MINUS) +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h +index 46fc474..b02b0f9 100644 +--- a/arch/x86/include/asm/checksum_32.h ++++ b/arch/x86/include/asm/checksum_32.h +@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + ++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ ++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ + /* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. +@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, + int *err_ptr) + { + might_sleep(); +- return csum_partial_copy_generic((__force void *)src, dst, ++ return csum_partial_copy_generic_from_user((__force void *)src, dst, + len, sum, err_ptr, NULL); + } + +@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, + { + might_sleep(); + if (access_ok(VERIFY_WRITE, dst, len)) +- return csum_partial_copy_generic(src, (__force void *)dst, ++ return csum_partial_copy_generic_to_user(src, (__force void *)dst, + len, sum, NULL, err_ptr); + + if (len) +diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h +index 8d871ea..c1a0dc9 100644 +--- a/arch/x86/include/asm/cmpxchg.h ++++ b/arch/x86/include/asm/cmpxchg.h +@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + extern void __xadd_wrong_size(void) + __compiletime_error("Bad argument size for xadd"); ++extern void __xadd_check_overflow_wrong_size(void) ++ __compiletime_error("Bad argument size for xadd_check_overflow"); + extern void __add_wrong_size(void) + __compiletime_error("Bad argument size for add"); ++extern void __add_check_overflow_wrong_size(void) ++ __compiletime_error("Bad argument size for add_check_overflow"); + + /* + * Constants for operation sizes. On 32-bit, the 64-bit size it set to +@@ -67,6 +71,34 @@ extern void __add_wrong_size(void) + __ret; \ + }) + ++#define __xchg_op_check_overflow(ptr, arg, op, lock) \ ++ ({ \ ++ __typeof__ (*(ptr)) __ret = (arg); \ ++ switch (sizeof(*(ptr))) { \ ++ case __X86_CASE_L: \ ++ asm volatile (lock #op "l %0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ case __X86_CASE_Q: \ ++ asm volatile (lock #op "q %q0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ default: \ ++ __ ## op ## _check_overflow_wrong_size(); \ ++ } \ ++ __ret; \ ++ }) ++ + /* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. + * Since this is generally used to protect other memory information, we +@@ -167,6 +199,9 @@ extern void __add_wrong_size(void) + #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") + #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") + ++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock) ++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX) ++ + #define __add(ptr, inc, lock) \ + ({ \ + __typeof__ (*(ptr)) __ret = (inc); \ +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h +index 59c6c40..5e0b22c 100644 +--- a/arch/x86/include/asm/compat.h ++++ b/arch/x86/include/asm/compat.h +@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64; + typedef u32 compat_uint_t; + typedef u32 compat_ulong_t; + typedef u64 __attribute__((aligned(4))) compat_u64; +-typedef u32 compat_uptr_t; ++typedef u32 __user compat_uptr_t; + + struct compat_timespec { + compat_time_t tv_sec; +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index 93fe929..90858b7 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -207,7 +207,7 @@ + #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ + #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ + #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ +-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ ++#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */ + #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ + #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ + #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ +@@ -377,7 +377,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) + ".section .discard,\"aw\",@progbits\n" + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "3: movb $1,%0\n" + "4:\n" + ".previous\n" +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index 8bf1c06..b6ae785 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -4,6 +4,7 @@ + #include <asm/desc_defs.h> + #include <asm/ldt.h> + #include <asm/mmu.h> ++#include <asm/pgtable.h> + + #include <linux/smp.h> + #include <linux/percpu.h> +@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + + desc->type = (info->read_exec_only ^ 1) << 1; + desc->type |= info->contents << 2; ++ desc->type |= info->seg_not_present ^ 1; + + desc->s = 1; + desc->dpl = 0x3; +@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + } + + extern struct desc_ptr idt_descr; +-extern gate_desc idt_table[]; + extern struct desc_ptr nmi_idt_descr; +-extern gate_desc nmi_idt_table[]; +- +-struct gdt_page { +- struct desc_struct gdt[GDT_ENTRIES]; +-} __attribute__((aligned(PAGE_SIZE))); +- +-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); ++extern gate_desc idt_table[256]; ++extern gate_desc nmi_idt_table[256]; + ++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; + static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) + { +- return per_cpu(gdt_page, cpu).gdt; ++ return cpu_gdt_table[cpu]; + } + + #ifdef CONFIG_X86_64 +@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, + unsigned long base, unsigned dpl, unsigned flags, + unsigned short seg) + { +- gate->a = (seg << 16) | (base & 0xffff); +- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); ++ gate->gate.offset_low = base; ++ gate->gate.seg = seg; ++ gate->gate.reserved = 0; ++ gate->gate.type = type; ++ gate->gate.s = 0; ++ gate->gate.dpl = dpl; ++ gate->gate.p = 1; ++ gate->gate.offset_high = base >> 16; + } + + #endif +@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) + + static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) + { ++ pax_open_kernel(); + memcpy(&idt[entry], gate, sizeof(*gate)); ++ pax_close_kernel(); + } + + static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) + { ++ pax_open_kernel(); + memcpy(&ldt[entry], desc, 8); ++ pax_close_kernel(); + } + + static inline void +@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int + default: size = sizeof(*gdt); break; + } + ++ pax_open_kernel(); + memcpy(&gdt[entry], desc, size); ++ pax_close_kernel(); + } + + static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, +@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) + + static inline void native_load_tr_desc(void) + { ++ pax_open_kernel(); + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); ++ pax_close_kernel(); + } + + static inline void native_load_gdt(const struct desc_ptr *dtr) +@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + unsigned int i; + ++ pax_open_kernel(); + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; ++ pax_close_kernel(); + } + + #define _LDT_empty(info) \ +@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc) + preempt_enable(); + } + +-static inline unsigned long get_desc_base(const struct desc_struct *desc) ++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc) + { + return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); + } +@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) + } + + #ifdef CONFIG_X86_64 +-static inline void set_nmi_gate(int gate, void *addr) ++static inline void set_nmi_gate(int gate, const void *addr) + { + gate_desc s; + +@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr) + } + #endif + +-static inline void _set_gate(int gate, unsigned type, void *addr, ++static inline void _set_gate(int gate, unsigned type, const void *addr, + unsigned dpl, unsigned ist, unsigned seg) + { + gate_desc s; +@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr, + * Pentium F0 0F bugfix can have resulted in the mapped + * IDT being write-protected. + */ +-static inline void set_intr_gate(unsigned int n, void *addr) ++static inline void set_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); +@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr) + /* + * This routine sets up an interrupt gate at directory privilege level 3. + */ +-static inline void set_system_intr_gate(unsigned int n, void *addr) ++static inline void set_system_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_system_trap_gate(unsigned int n, void *addr) ++static inline void set_system_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_trap_gate(unsigned int n, void *addr) ++static inline void set_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); +@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr) + static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) + { + BUG_ON((unsigned)n > 0xFF); +- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); ++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); + } + +-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); + } + +-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); + } + ++#ifdef CONFIG_X86_32 ++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) ++{ ++ struct desc_struct d; ++ ++ if (likely(limit)) ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ pack_descriptor(&d, base, limit, 0xFB, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); ++} ++#endif ++ + #endif /* _ASM_X86_DESC_H */ +diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h +index 278441f..b95a174 100644 +--- a/arch/x86/include/asm/desc_defs.h ++++ b/arch/x86/include/asm/desc_defs.h +@@ -31,6 +31,12 @@ struct desc_struct { + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; + }; ++ struct { ++ u16 offset_low; ++ u16 seg; ++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; ++ unsigned offset_high: 16; ++ } gate; + }; + } __attribute__((packed)); + +diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h +index ced283a..ffe04cc 100644 +--- a/arch/x86/include/asm/div64.h ++++ b/arch/x86/include/asm/div64.h +@@ -39,7 +39,7 @@ + __mod; \ + }) + +-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) + { + union { + u64 v64; +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 9c999c1..3860cb8 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -243,7 +243,25 @@ extern int force_personality32; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) ++#else + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++#ifdef CONFIG_X86_32 ++#define PAX_ELF_ET_DYN_BASE 0x10000000UL ++ ++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#else ++#define PAX_ELF_ET_DYN_BASE 0x400000UL ++ ++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#endif ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +@@ -296,16 +314,12 @@ do { \ + + #define ARCH_DLINFO \ + do { \ +- if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ + } while (0) + + #define ARCH_DLINFO_X32 \ + do { \ +- if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ + } while (0) + + #define AT_SYSINFO 32 +@@ -320,7 +334,7 @@ else \ + + #endif /* !CONFIG_X86_32 */ + +-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) ++#define VDSO_CURRENT_BASE (current->mm->context.vdso) + + #define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm, + extern int syscall32_setup_pages(struct linux_binprm *, int exstack); + #define compat_arch_setup_additional_pages syscall32_setup_pages + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + /* + * True on X86_32 or when emulating IA32 on X86_64 + */ +diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h +index 75ce3f4..882e801 100644 +--- a/arch/x86/include/asm/emergency-restart.h ++++ b/arch/x86/include/asm/emergency-restart.h +@@ -13,6 +13,6 @@ enum reboot_type { + + extern enum reboot_type reboot_type; + +-extern void machine_emergency_restart(void); ++extern void machine_emergency_restart(void) __noreturn; + + #endif /* _ASM_X86_EMERGENCY_RESTART_H */ +diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h +index e25cc33..425d099 100644 +--- a/arch/x86/include/asm/fpu-internal.h ++++ b/arch/x86/include/asm/fpu-internal.h +@@ -127,7 +127,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk) + ({ \ + int err; \ + asm volatile(ASM_STAC "\n" \ +- "1:" #insn "\n\t" \ ++ "1:" \ ++ __copyuser_seg \ ++ #insn "\n\t" \ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl $-1,%[err]\n" \ +@@ -300,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk) + "emms\n\t" /* clear stack tags */ + "fildl %P[addr]", /* set F?P to defined value */ + X86_FEATURE_FXSAVE_LEAK, +- [addr] "m" (tsk->thread.fpu.has_fpu)); ++ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0)); + + return fpu_restore_checking(&tsk->thread.fpu); + } +diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h +index be27ba1..8f13ff9 100644 +--- a/arch/x86/include/asm/futex.h ++++ b/arch/x86/include/asm/futex.h +@@ -12,6 +12,7 @@ + #include <asm/smap.h> + + #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" insn "\n" \ + "2:\t" ASM_CLAC "\n" \ +@@ -20,15 +21,16 @@ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ ++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \ + : "i" (-EFAULT), "0" (oparg), "1" (0)) + + #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ +- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ ++ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \ + "\tjnz\t1b\n" \ + "3:\t" ASM_CLAC "\n" \ + "\t.section .fixup,\"ax\"\n" \ +@@ -38,7 +40,7 @@ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ +- "+m" (*uaddr), "=&r" (tem) \ ++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + + static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) + + switch (op) { + case FUTEX_OP_SET: +- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); ++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: +- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, ++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, + uaddr, oparg); + break; + case FUTEX_OP_OR: +@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + return -EFAULT; + + asm volatile("\t" ASM_STAC "\n" +- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" ++ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n" + "2:\t" ASM_CLAC "\n" + "\t.section .fixup, \"ax\"\n" + "3:\tmov %3, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE(1b, 3b) +- : "+r" (ret), "=a" (oldval), "+m" (*uaddr) ++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr)) + : "i" (-EFAULT), "r" (newval), "1" (oldval) + : "memory" + ); +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h +index 10a78c3..cc77143 100644 +--- a/arch/x86/include/asm/hw_irq.h ++++ b/arch/x86/include/asm/hw_irq.h +@@ -147,8 +147,8 @@ extern void setup_ioapic_dest(void); + extern void enable_IO_APIC(void); + + /* Statistics */ +-extern atomic_t irq_err_count; +-extern atomic_t irq_mis_count; ++extern atomic_unchecked_t irq_err_count; ++extern atomic_unchecked_t irq_mis_count; + + /* EISA */ + extern void eisa_set_level_irq(unsigned int irq); +diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h +index a203659..9889f1c 100644 +--- a/arch/x86/include/asm/i8259.h ++++ b/arch/x86/include/asm/i8259.h +@@ -62,7 +62,7 @@ struct legacy_pic { + void (*init)(int auto_eoi); + int (*irq_pending)(unsigned int irq); + void (*make_irq)(unsigned int irq); +-}; ++} __do_const; + + extern struct legacy_pic *legacy_pic; + extern struct legacy_pic null_legacy_pic; +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index d8e8eef..1765f78 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \ + "m" (*(volatile type __force *)addr) barrier); } + + build_mmio_read(readb, "b", unsigned char, "=q", :"memory") +-build_mmio_read(readw, "w", unsigned short, "=r", :"memory") +-build_mmio_read(readl, "l", unsigned int, "=r", :"memory") ++build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory") ++build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory") + + build_mmio_read(__readb, "b", unsigned char, "=q", ) +-build_mmio_read(__readw, "w", unsigned short, "=r", ) +-build_mmio_read(__readl, "l", unsigned int, "=r", ) ++build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", ) ++build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", ) + + build_mmio_write(writeb, "b", unsigned char, "q", :"memory") + build_mmio_write(writew, "w", unsigned short, "r", :"memory") +@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) + return ioremap_nocache(offset, size); + } + +-extern void iounmap(volatile void __iomem *addr); ++extern void iounmap(const volatile void __iomem *addr); + + extern void set_iounmap_nonlazy(void); + +@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void); + + #include <linux/vmalloc.h> + ++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE ++static inline int valid_phys_addr_range(unsigned long addr, size_t count) ++{ ++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ ++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) ++{ ++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ + /* + * Convert a virtual cached pointer to an uncached pointer + */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index bba3cf8..06bc8da 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void) + sti; \ + sysexit + ++#define GET_CR0_INTO_RDI mov %cr0, %rdi ++#define SET_RDI_INTO_CR0 mov %rdi, %cr0 ++#define GET_CR3_INTO_RDI mov %cr3, %rdi ++#define SET_RDI_INTO_CR3 mov %rdi, %cr3 ++ + #else + #define INTERRUPT_RETURN iret + #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit +diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h +index 5a6d287..f815789 100644 +--- a/arch/x86/include/asm/kprobes.h ++++ b/arch/x86/include/asm/kprobes.h +@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t; + #define RELATIVEJUMP_SIZE 5 + #define RELATIVECALL_OPCODE 0xe8 + #define RELATIVE_ADDR_SIZE 4 +-#define MAX_STACK_SIZE 64 +-#define MIN_STACK_SIZE(ADDR) \ +- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) \ +- ? (MAX_STACK_SIZE) \ +- : (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) ++#define MAX_STACK_SIZE 64UL ++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR)) + + #define flush_insn_slot(p) do { } while (0) + +diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h +index 2d89e39..baee879 100644 +--- a/arch/x86/include/asm/local.h ++++ b/arch/x86/include/asm/local.h +@@ -10,33 +10,97 @@ typedef struct { + atomic_long_t a; + } local_t; + ++typedef struct { ++ atomic_long_unchecked_t a; ++} local_unchecked_t; ++ + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + + #define local_read(l) atomic_long_read(&(l)->a) ++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) + #define local_set(l, i) atomic_long_set(&(l)->a, (i)) ++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) + + static inline void local_inc(local_t *l) + { +- asm volatile(_ASM_INC "%0" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_DEC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter)); ++} ++ ++static inline void local_inc_unchecked(local_unchecked_t *l) ++{ ++ asm volatile(_ASM_INC "%0\n" + : "+m" (l->a.counter)); + } + + static inline void local_dec(local_t *l) + { +- asm volatile(_ASM_DEC "%0" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter)); ++} ++ ++static inline void local_dec_unchecked(local_unchecked_t *l) ++{ ++ asm volatile(_ASM_DEC "%0\n" + : "+m" (l->a.counter)); + } + + static inline void local_add(long i, local_t *l) + { +- asm volatile(_ASM_ADD "%1,%0" ++ asm volatile(_ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter) ++ : "ir" (i)); ++} ++ ++static inline void local_add_unchecked(long i, local_unchecked_t *l) ++{ ++ asm volatile(_ASM_ADD "%1,%0\n" + : "+m" (l->a.counter) + : "ir" (i)); + } + + static inline void local_sub(long i, local_t *l) + { +- asm volatile(_ASM_SUB "%1,%0" ++ asm volatile(_ASM_SUB "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_ADD "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter) ++ : "ir" (i)); ++} ++ ++static inline void local_sub_unchecked(long i, local_unchecked_t *l) ++{ ++ asm volatile(_ASM_SUB "%1,%0\n" + : "+m" (l->a.counter) + : "ir" (i)); + } +@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_SUB "%2,%0; sete %1" ++ asm volatile(_ASM_SUB "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_ADD "%2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_DEC "%0; sete %1" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_INC "%0; sete %1" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_DEC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_ADD "%2,%0; sets %1" ++ asm volatile(_ASM_ADD "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_SUB "%2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l) + static inline long local_add_return(long i, local_t *l) + { + long __i = i; ++ asm volatile(_ASM_XADD "%0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_MOV "%0,%1\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+r" (i), "+m" (l->a.counter) ++ : : "memory"); ++ return i + __i; ++} ++ ++/** ++ * local_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @l: pointer to type local_unchecked_t ++ * ++ * Atomically adds @i to @l and returns @i + @l ++ */ ++static inline long local_add_return_unchecked(long i, local_unchecked_t *l) ++{ ++ long __i = i; + asm volatile(_ASM_XADD "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); +@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l) + + #define local_cmpxchg(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) ++#define local_cmpxchg_unchecked(l, o, n) \ ++ (cmpxchg_local(&((l)->a.counter), (o), (n))) + /* Always has a lock prefix */ + #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) + +diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h +new file mode 100644 +index 0000000..2bfd3ba +--- /dev/null ++++ b/arch/x86/include/asm/mman.h +@@ -0,0 +1,15 @@ ++#ifndef _X86_MMAN_H ++#define _X86_MMAN_H ++ ++#include <uapi/asm/mman.h> ++ ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#ifdef CONFIG_X86_32 ++#define arch_mmap_check i386_mmap_check ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); ++#endif ++#endif ++#endif ++ ++#endif /* X86_MMAN_H */ +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 5f55e69..e20bfb1 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -9,7 +9,7 @@ + * we put the segment information here. + */ + typedef struct { +- void *ldt; ++ struct desc_struct *ldt; + int size; + + #ifdef CONFIG_X86_64 +@@ -18,7 +18,19 @@ typedef struct { + #endif + + struct mutex lock; +- void *vdso; ++ unsigned long vdso; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ unsigned long user_cs_base; ++ unsigned long user_cs_limit; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpumask_t cpu_user_cs_mask; ++#endif ++ ++#endif ++#endif + } mm_context_t; + + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index cdbf367..adb37ac 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm); + + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + { ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ unsigned int i; ++ pgd_t *pgd; ++ ++ pax_open_kernel(); ++ pgd = get_cpu_pgd(smp_processor_id()); ++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) ++ set_pgd_batched(pgd+i, native_make_pgd(0)); ++ pax_close_kernel(); ++#endif ++ + #ifdef CONFIG_SMP + if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); +@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) + { + unsigned cpu = smp_processor_id(); ++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ int tlbstate = TLBSTATE_OK; ++#endif + + if (likely(prev != next)) { + #ifdef CONFIG_SMP ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ tlbstate = this_cpu_read(cpu_tlbstate.state); ++#endif + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); + this_cpu_write(cpu_tlbstate.active_mm, next); + #endif + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* Re-load page tables */ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#else + load_cr3(next->pgd); ++#endif + + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); +@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + */ + if (unlikely(prev->context.ldt != next->context.ldt)) + load_LDT_nolock(&next->context); +- } ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ if (!(__supported_pte_mask & _PAGE_NX)) { ++ smp_mb__before_clear_bit(); ++ cpu_clear(cpu, prev->context.cpu_user_cs_mask); ++ smp_mb__after_clear_bit(); ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++ } ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || ++ prev->context.user_cs_limit != next->context.user_cs_limit)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); + #ifdef CONFIG_SMP ++ else if (unlikely(tlbstate != TLBSTATE_OK)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++#endif ++ ++ } + else { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#endif ++ ++#ifdef CONFIG_SMP + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); + +@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + * tlb flush IPI delivery. We must reload CR3 + * to make sure to use no freed page tables. + */ ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(next->pgd); ++#endif ++ + load_LDT_nolock(&next->context); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX)) ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) ++#endif ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++ + } +- } + #endif ++ } + } + + #define activate_mm(prev, next) \ +diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h +index e3b7819..b257c64 100644 +--- a/arch/x86/include/asm/module.h ++++ b/arch/x86/include/asm/module.h +@@ -5,6 +5,7 @@ + + #ifdef CONFIG_X86_64 + /* X86_64 does not define MODULE_PROC_FAMILY */ ++#define MODULE_PROC_FAMILY "" + #elif defined CONFIG_M486 + #define MODULE_PROC_FAMILY "486 " + #elif defined CONFIG_M586 +@@ -57,8 +58,20 @@ + #error unknown processor family + #endif + +-#ifdef CONFIG_X86_32 +-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS " ++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) ++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR " ++#else ++#define MODULE_PAX_KERNEXEC "" + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define MODULE_PAX_UDEREF "UDEREF " ++#else ++#define MODULE_PAX_UDEREF "" ++#endif ++ ++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF ++ + #endif /* _ASM_X86_MODULE_H */ +diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h +index c0fa356..07a498a 100644 +--- a/arch/x86/include/asm/nmi.h ++++ b/arch/x86/include/asm/nmi.h +@@ -42,11 +42,11 @@ struct nmiaction { + nmi_handler_t handler; + unsigned long flags; + const char *name; +-}; ++} __do_const; + + #define register_nmi_handler(t, fn, fg, n, init...) \ + ({ \ +- static struct nmiaction init fn##_na = { \ ++ static const struct nmiaction init fn##_na = { \ + .handler = (fn), \ + .name = (n), \ + .flags = (fg), \ +@@ -54,7 +54,7 @@ struct nmiaction { + __register_nmi_handler((t), &fn##_na); \ + }) + +-int __register_nmi_handler(unsigned int, struct nmiaction *); ++int __register_nmi_handler(unsigned int, const struct nmiaction *); + + void unregister_nmi_handler(unsigned int, const char *); + +diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h +index c878924..21f4889 100644 +--- a/arch/x86/include/asm/page.h ++++ b/arch/x86/include/asm/page.h +@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x))) + + #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) ++#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base)) + + #define __boot_va(x) __va(x) + #define __boot_pa(x) __pa(x) +diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h +index 0f1ddee..e2fc3d1 100644 +--- a/arch/x86/include/asm/page_64.h ++++ b/arch/x86/include/asm/page_64.h +@@ -7,9 +7,9 @@ + + /* duplicated to the one in bootmem.h */ + extern unsigned long max_pfn; +-extern unsigned long phys_base; ++extern const unsigned long phys_base; + +-static inline unsigned long __phys_addr_nodebug(unsigned long x) ++static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x) + { + unsigned long y = x - __START_KERNEL_map; + +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h +index 7361e47..16dc226 100644 +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val) + return (pmd_t) { ret }; + } + +-static inline pmdval_t pmd_val(pmd_t pmd) ++static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd) + { + pmdval_t ret; + +@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) + val); + } + ++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ ++ pgdval_t val = native_pgd_val(pgd); ++ ++ if (sizeof(pgdval_t) > sizeof(long)) ++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp, ++ val, (u64)val >> 32); ++ else ++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp, ++ val); ++} ++ + static inline void pgd_clear(pgd_t *pgdp) + { + set_pgd(pgdp, __pgd(0)); +@@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, + pv_mmu_ops.set_fixmap(idx, phys, flags); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); ++} ++ ++static inline unsigned long pax_close_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) + + static inline int arch_spin_is_locked(struct arch_spinlock *lock) +@@ -930,7 +957,7 @@ extern void default_banner(void); + + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) + #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) +-#define PARA_INDIRECT(addr) *%cs:addr ++#define PARA_INDIRECT(addr) *%ss:addr + #endif + + #define INTERRUPT_RETURN \ +@@ -1005,6 +1032,21 @@ extern void default_banner(void); + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ + CLBR_NONE, \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) ++ ++#define GET_CR0_INTO_RDI \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR0 \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++ ++#define GET_CR3_INTO_RDI \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR3 \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) ++ + #endif /* CONFIG_X86_32 */ + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h +index b3b0ec1..b1cd3eb 100644 +--- a/arch/x86/include/asm/paravirt_types.h ++++ b/arch/x86/include/asm/paravirt_types.h +@@ -84,7 +84,7 @@ struct pv_init_ops { + */ + unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, + unsigned long addr, unsigned len); +-}; ++} __no_const; + + + struct pv_lazy_ops { +@@ -98,7 +98,7 @@ struct pv_time_ops { + unsigned long long (*sched_clock)(void); + unsigned long long (*steal_clock)(int cpu); + unsigned long (*get_tsc_khz)(void); +-}; ++} __no_const; + + struct pv_cpu_ops { + /* hooks for various privileged instructions */ +@@ -192,7 +192,7 @@ struct pv_cpu_ops { + + void (*start_context_switch)(struct task_struct *prev); + void (*end_context_switch)(struct task_struct *next); +-}; ++} __no_const; + + struct pv_irq_ops { + /* +@@ -223,7 +223,7 @@ struct pv_apic_ops { + unsigned long start_eip, + unsigned long start_esp); + #endif +-}; ++} __no_const; + + struct pv_mmu_ops { + unsigned long (*read_cr2)(void); +@@ -313,6 +313,7 @@ struct pv_mmu_ops { + struct paravirt_callee_save make_pud; + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); ++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval); + #endif /* PAGETABLE_LEVELS == 4 */ + #endif /* PAGETABLE_LEVELS >= 3 */ + +@@ -324,6 +325,12 @@ struct pv_mmu_ops { + an mfn. We can tell which is which from the index. */ + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long (*pax_open_kernel)(void); ++ unsigned long (*pax_close_kernel)(void); ++#endif ++ + }; + + struct arch_spinlock; +@@ -334,7 +341,7 @@ struct pv_lock_ops { + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); +-}; ++} __no_const; + + /* This contains all the paravirt structures: we get a convenient + * number for each function using the offset which we use to indicate +diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h +index b4389a4..7024269 100644 +--- a/arch/x86/include/asm/pgalloc.h ++++ b/arch/x86/include/asm/pgalloc.h +@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) + { + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); ++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); ++} ++ ++static inline void pmd_populate_user(struct mm_struct *mm, ++ pmd_t *pmd, pte_t *pte) ++{ ++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); + } + +@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + + #ifdef CONFIG_X86_PAE + extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) ++{ ++ pud_populate(mm, pudp, pmd); ++} + #else /* !CONFIG_X86_PAE */ + static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + { + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); + set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); + } ++ ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); ++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd))); ++} + #endif /* CONFIG_X86_PAE */ + + #if PAGETABLE_LEVELS > 3 +@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) + set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); + } + ++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) ++{ ++ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); ++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud))); ++} ++ + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); +diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h +index f2b489c..4f7e2e5 100644 +--- a/arch/x86/include/asm/pgtable-2level.h ++++ b/arch/x86/include/asm/pgtable-2level.h +@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h +index 4cc9f2b..5fd9226 100644 +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); ++ pax_close_kernel(); + } + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); ++ pax_close_kernel(); + } + + /* +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 1e67223..dd6e7ea 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #ifndef __PAGETABLE_PUD_FOLDED + #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) ++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd) + #define pgd_clear(pgd) native_pgd_clear(pgd) + #endif + +@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #define arch_end_context_switch(prev) do {} while(0) + ++#define pax_open_kernel() native_pax_open_kernel() ++#define pax_close_kernel() native_pax_close_kernel() + #endif /* CONFIG_PARAVIRT */ + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long native_pax_open_kernel(void) ++{ ++ unsigned long cr0; ++ ++ preempt_disable(); ++ barrier(); ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(cr0 & X86_CR0_WP); ++ write_cr0(cr0); ++ return cr0 ^ X86_CR0_WP; ++} ++ ++static inline unsigned long native_pax_close_kernel(void) ++{ ++ unsigned long cr0; ++ ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(!(cr0 & X86_CR0_WP)); ++ write_cr0(cr0); ++ barrier(); ++ preempt_enable_no_resched(); ++ return cr0 ^ X86_CR0_WP; ++} ++#else ++static inline unsigned long native_pax_open_kernel(void) { return 0; } ++static inline unsigned long native_pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ ++static inline int pte_user(pte_t pte) ++{ ++ return pte_val(pte) & _PAGE_USER; ++} ++ + static inline int pte_dirty(pte_t pte) + { + return pte_flags(pte) & _PAGE_DIRTY; +@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud) + return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; + } + ++static inline unsigned long pgd_pfn(pgd_t pgd) ++{ ++ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; ++} ++ + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) + + static inline int pmd_large(pmd_t pte) +@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte) + return pte_clear_flags(pte, _PAGE_RW); + } + ++static inline pte_t pte_mkread(pte_t pte) ++{ ++ return __pte(pte_val(pte) | _PAGE_USER); ++} ++ + static inline pte_t pte_mkexec(pte_t pte) + { +- return pte_clear_flags(pte, _PAGE_NX); ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_clear_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_set_flags(pte, _PAGE_USER); ++} ++ ++static inline pte_t pte_exprotect(pte_t pte) ++{ ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_set_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_clear_flags(pte, _PAGE_USER); + } + + static inline pte_t pte_mkdirty(pte_t pte) +@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr); + #endif + + #ifndef __ASSEMBLY__ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; ++static inline pgd_t *get_cpu_pgd(unsigned int cpu) ++{ ++ return cpu_pgd[cpu]; ++} ++#endif ++ + #include <linux/mm_types.h> + #include <linux/log2.h> + +@@ -529,7 +603,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud) + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) ++#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT) + + /* Find an entry in the second-level page table.. */ + static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +@@ -569,7 +643,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) ++#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT) + + /* to find an entry in a page-table-directory. */ + static inline unsigned long pud_index(unsigned long address) +@@ -584,7 +658,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) + + static inline int pgd_bad(pgd_t pgd) + { +- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; ++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; + } + + static inline int pgd_none(pgd_t pgd) +@@ -607,7 +681,12 @@ static inline int pgd_none(pgd_t pgd) + * pgd_offset() returns a (pgd_t *) + * pgd_index() is used get the offset into the pgd page's array of pgd_t's; + */ +-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) ++#endif ++ + /* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's +@@ -618,6 +697,22 @@ static inline int pgd_none(pgd_t pgd) + #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) + #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) + ++#ifdef CONFIG_X86_32 ++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY ++#else ++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT ++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#ifdef __ASSEMBLY__ ++#define pax_user_shadow_base pax_user_shadow_base(%rip) ++#else ++extern unsigned long pax_user_shadow_base; ++#endif ++#endif ++ ++#endif ++ + #ifndef __ASSEMBLY__ + + extern int direct_gbpages; +@@ -784,11 +879,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) ++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) + { +- memcpy(dst, src, count * sizeof(pgd_t)); ++ pax_open_kernel(); ++ while (count--) ++ *dst++ = *src++; ++ pax_close_kernel(); + } + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src); ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src); ++#else ++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {} ++#endif ++ + #define PTE_SHIFT ilog2(PTRS_PER_PTE) + static inline int page_level_shift(enum pg_level level) + { +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h +index 9ee3221..b979c6b 100644 +--- a/arch/x86/include/asm/pgtable_32.h ++++ b/arch/x86/include/asm/pgtable_32.h +@@ -25,9 +25,6 @@ + struct mm_struct; + struct vm_area_struct; + +-extern pgd_t swapper_pg_dir[1024]; +-extern pgd_t initial_page_table[1024]; +- + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } + void paging_init(void); +@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + # include <asm/pgtable-2level.h> + #endif + ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++extern pgd_t initial_page_table[PTRS_PER_PGD]; ++#ifdef CONFIG_X86_PAE ++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; ++#endif ++ + #if defined(CONFIG_HIGHPTE) + #define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ +@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + /* Clear a kernel PTE and flush it from the TLB */ + #define kpte_clear_flush(ptep, vaddr) \ + do { \ ++ pax_open_kernel(); \ + pte_clear(&init_mm, (vaddr), (ptep)); \ ++ pax_close_kernel(); \ + __flush_tlb_one((vaddr)); \ + } while (0) + + #endif /* !__ASSEMBLY__ */ + ++#define HAVE_ARCH_UNMAPPED_AREA ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN ++ + /* + * kern_addr_valid() is (1) for FLATMEM and (0) for + * SPARSEMEM and DISCONTIGMEM +diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h +index ed5903b..c7fe163 100644 +--- a/arch/x86/include/asm/pgtable_32_types.h ++++ b/arch/x86/include/asm/pgtable_32_types.h +@@ -8,7 +8,7 @@ + */ + #ifdef CONFIG_X86_PAE + # include <asm/pgtable-3level_types.h> +-# define PMD_SIZE (1UL << PMD_SHIFT) ++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) + # define PMD_MASK (~(PMD_SIZE - 1)) + #else + # include <asm/pgtable-2level_types.h> +@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ + # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++#ifndef __ASSEMBLY__ ++extern unsigned char MODULES_EXEC_VADDR[]; ++extern unsigned char MODULES_EXEC_END[]; ++#endif ++#include <asm/boot.h> ++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) ++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) ++#else ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++#endif ++ + #define MODULES_VADDR VMALLOC_START + #define MODULES_END VMALLOC_END + #define MODULES_LEN (MODULES_VADDR - MODULES_END) +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h +index e22c1db..23a625a 100644 +--- a/arch/x86/include/asm/pgtable_64.h ++++ b/arch/x86/include/asm/pgtable_64.h +@@ -16,10 +16,14 @@ + + extern pud_t level3_kernel_pgt[512]; + extern pud_t level3_ident_pgt[512]; ++extern pud_t level3_vmalloc_start_pgt[512]; ++extern pud_t level3_vmalloc_end_pgt[512]; ++extern pud_t level3_vmemmap_pgt[512]; ++extern pud_t level2_vmemmap_pgt[512]; + extern pmd_t level2_kernel_pgt[512]; + extern pmd_t level2_fixmap_pgt[512]; +-extern pmd_t level2_ident_pgt[512]; +-extern pgd_t init_level4_pgt[]; ++extern pmd_t level2_ident_pgt[512*2]; ++extern pgd_t init_level4_pgt[512]; + + #define swapper_pg_dir init_level4_pgt + +@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_pmd_clear(pmd_t *pmd) +@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + *pudp = pud; ++ pax_close_kernel(); + } + + static inline void native_pud_clear(pud_t *pud) +@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud) + + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) + { ++ pax_open_kernel(); ++ *pgdp = pgd; ++ pax_close_kernel(); ++} ++ ++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ + *pgdp = pgd; + } + +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 2d88344..4679fc3 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t; + #define MODULES_VADDR _AC(0xffffffffa0000000, UL) + #define MODULES_END _AC(0xffffffffff000000, UL) + #define MODULES_LEN (MODULES_END - MODULES_VADDR) ++#define MODULES_EXEC_VADDR MODULES_VADDR ++#define MODULES_EXEC_END MODULES_END ++ ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) + + #define EARLY_DYNAMIC_PAGE_TABLES 64 + +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index 567b5d0..bd91d64 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -16,13 +16,12 @@ + #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ + #define _PAGE_BIT_PAT 7 /* on 4KB pages */ + #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ ++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ + #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ + #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ ++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL ++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */ + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + + /* If _PAGE_BIT_PRESENT is clear, we use these: */ +@@ -40,7 +39,6 @@ + #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) + #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) + #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) +-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) + #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) + #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +@@ -57,8 +55,10 @@ + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +-#else ++#elif defined(CONFIG_KMEMCHECK) + #define _PAGE_NX (_AT(pteval_t, 0)) ++#else ++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) + #endif + + #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +@@ -116,6 +116,9 @@ + #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) + ++#define PAGE_READONLY_NOEXEC PAGE_READONLY ++#define PAGE_SHARED_NOEXEC PAGE_SHARED ++ + #define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) + #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) +@@ -126,7 +129,7 @@ + #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) + #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) ++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +@@ -188,8 +191,8 @@ + * bits are combined, this will alow user to access the high address mapped + * VDSO in the presence of CONFIG_COMPAT_VDSO + */ +-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ ++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ ++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ + #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ + #endif + +@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) + { + return native_pgd_val(pgd) & PTE_FLAGS_MASK; + } ++#endif + ++#if PAGETABLE_LEVELS == 3 ++#include <asm-generic/pgtable-nopud.h> ++#endif ++ ++#if PAGETABLE_LEVELS == 2 ++#include <asm-generic/pgtable-nopmd.h> ++#endif ++ ++#ifndef __ASSEMBLY__ + #if PAGETABLE_LEVELS > 3 + typedef struct { pudval_t pud; } pud_t; + +@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud) + return pud.pud; + } + #else +-#include <asm-generic/pgtable-nopud.h> +- + static inline pudval_t native_pud_val(pud_t pud) + { + return native_pgd_val(pud.pgd); +@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) + return pmd.pmd; + } + #else +-#include <asm-generic/pgtable-nopmd.h> +- + static inline pmdval_t native_pmd_val(pmd_t pmd) + { + return native_pgd_val(pmd.pud.pgd); +@@ -303,7 +312,6 @@ typedef struct page *pgtable_t; + + extern pteval_t __supported_pte_mask; + extern void set_nx(void); +-extern int nx_enabled; + + #define pgprot_writecombine pgprot_writecombine + extern pgprot_t pgprot_writecombine(pgprot_t prot); +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 3270116..8d99d82 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -285,7 +285,7 @@ struct tss_struct { + + } ____cacheline_aligned; + +-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); ++extern struct tss_struct init_tss[NR_CPUS]; + + /* + * Save the original ist values for checking stack pointers during debugging +@@ -826,11 +826,18 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define TASK_SIZE PAGE_OFFSET + #define TASK_SIZE_MAX TASK_SIZE ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) ++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) ++#else + #define STACK_TOP TASK_SIZE +-#define STACK_TOP_MAX STACK_TOP ++#endif ++ ++#define STACK_TOP_MAX TASK_SIZE + + #define INIT_THREAD { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ +@@ -844,7 +851,7 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define INIT_TSS { \ + .x86_tss = { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ +@@ -855,11 +862,7 @@ static inline void spin_lock_prefetch(const void *x) + extern unsigned long thread_saved_pc(struct task_struct *tsk); + + #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) +-#define KSTK_TOP(info) \ +-({ \ +- unsigned long *__ptr = (unsigned long *)(info); \ +- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ +-}) ++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0) + + /* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. +@@ -874,7 +877,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define task_pt_regs(task) \ + ({ \ + struct pt_regs *__regs__; \ +- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ ++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \ + __regs__ - 1; \ + }) + +@@ -884,13 +887,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + /* + * User space process size. 47bits minus one guard page. + */ +-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) ++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) + + /* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ + #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ +- 0xc0000000 : 0xFFFFe000) ++ 0xc0000000 : 0xFFFFf000) + + #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) +@@ -901,11 +904,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define STACK_TOP_MAX TASK_SIZE_MAX + + #define INIT_THREAD { \ +- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + #define INIT_TSS { \ +- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + /* +@@ -933,6 +936,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + */ + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) ++#endif ++ + #define KSTK_EIP(task) (task_pt_regs(task)->ip) + + /* Get/set a process' ability to use the timestamp counter instruction */ +@@ -993,7 +1000,7 @@ extern bool cpu_has_amd_erratum(const int *); + #define cpu_has_amd_erratum(x) (false) + #endif /* CONFIG_CPU_SUP_AMD */ + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + + void default_idle(void); +@@ -1003,6 +1010,6 @@ bool xen_set_default_idle(void); + #define xen_set_default_idle 0 + #endif + +-void stop_this_cpu(void *dummy); ++void stop_this_cpu(void *dummy) __noreturn; + + #endif /* _ASM_X86_PROCESSOR_H */ +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h +index 942a086..6c26446 100644 +--- a/arch/x86/include/asm/ptrace.h ++++ b/arch/x86/include/asm/ptrace.h +@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) + } + + /* +- * user_mode_vm(regs) determines whether a register set came from user mode. ++ * user_mode(regs) determines whether a register set came from user mode. + * This is true if V8086 mode was enabled OR if the register set was from + * protected mode with RPL-3 CS value. This tricky test checks that with + * one comparison. Many places in the kernel can bypass this full check +- * if they have already ruled out V8086 mode, so user_mode(regs) can be used. ++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can ++ * be used. + */ +-static inline int user_mode(struct pt_regs *regs) ++static inline int user_mode_novm(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + #else +- return !!(regs->cs & 3); ++ return !!(regs->cs & SEGMENT_RPL_MASK); + #endif + } + +-static inline int user_mode_vm(struct pt_regs *regs) ++static inline int user_mode(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= + USER_RPL; + #else +- return user_mode(regs); ++ return user_mode_novm(regs); + #endif + } + +@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs) + #ifdef CONFIG_X86_64 + static inline bool user_64bit_mode(struct pt_regs *regs) + { ++ unsigned long cs = regs->cs & 0xffff; + #ifndef CONFIG_PARAVIRT + /* + * On non-paravirt systems, this is the only long mode CPL 3 + * selector. We do not allow long mode selectors in the LDT. + */ +- return regs->cs == __USER_CS; ++ return cs == __USER_CS; + #else + /* Headers are too twisted for this to go in paravirt.h. */ +- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; ++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs; + #endif + } + +@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, + * Traps from the kernel do not save sp and ss. + * Use the helper function to retrieve sp. + */ +- if (offset == offsetof(struct pt_regs, sp) && +- regs->cs == __KERNEL_CS) +- return kernel_stack_pointer(regs); ++ if (offset == offsetof(struct pt_regs, sp)) { ++ unsigned long cs = regs->cs & 0xffff; ++ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) ++ return kernel_stack_pointer(regs); ++ } + #endif + return *(unsigned long *)((unsigned long)regs + offset); + } +diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h +index 9c6b890..5305f53 100644 +--- a/arch/x86/include/asm/realmode.h ++++ b/arch/x86/include/asm/realmode.h +@@ -22,16 +22,14 @@ struct real_mode_header { + #endif + /* APM/BIOS reboot */ + u32 machine_real_restart_asm; +-#ifdef CONFIG_X86_64 + u32 machine_real_restart_seg; +-#endif + }; + + /* This must match data at trampoline_32/64.S */ + struct trampoline_header { + #ifdef CONFIG_X86_32 + u32 start; +- u16 gdt_pad; ++ u16 boot_cs; + u16 gdt_limit; + u32 gdt_base; + #else +diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h +index a82c4f1..ac45053 100644 +--- a/arch/x86/include/asm/reboot.h ++++ b/arch/x86/include/asm/reboot.h +@@ -6,13 +6,13 @@ + struct pt_regs; + + struct machine_ops { +- void (*restart)(char *cmd); +- void (*halt)(void); +- void (*power_off)(void); ++ void (* __noreturn restart)(char *cmd); ++ void (* __noreturn halt)(void); ++ void (* __noreturn power_off)(void); + void (*shutdown)(void); + void (*crash_shutdown)(struct pt_regs *); +- void (*emergency_restart)(void); +-}; ++ void (* __noreturn emergency_restart)(void); ++} __no_const; + + extern struct machine_ops machine_ops; + +diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h +index 2dbe4a7..ce1db00 100644 +--- a/arch/x86/include/asm/rwsem.h ++++ b/arch/x86/include/asm/rwsem.h +@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem) + { + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX _ASM_INC "(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_DEC "(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0x00000001 */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" +@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) + "1:\n\t" + " mov %1,%2\n\t" + " add %3,%2\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %3,%2\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + " jle 2f\n\t" + LOCK_PREFIX " cmpxchg %2,%0\n\t" + " jnz 1b\n\t" +@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) + long tmp; + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0xffff0001, returns the old value */ + " test %1,%1\n\t" + /* was the count 0 before? */ +@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 1, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 0xffff0001, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + { + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) +@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + */ + static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + { +- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (sem->count) + : "er" (delta)); + } +@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + */ + static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) + { +- return delta + xadd(&sem->count, delta); ++ return delta + xadd_check_overflow(&sem->count, delta); + } + + #endif /* __KERNEL__ */ +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h +index c48a950..c6d7468 100644 +--- a/arch/x86/include/asm/segment.h ++++ b/arch/x86/include/asm/segment.h +@@ -64,10 +64,15 @@ + * 26 - ESPFIX small SS + * 27 - per-cpu [ offset to per-cpu data area ] + * 28 - stack_canary-20 [ for stack protector ] +- * 29 - unused +- * 30 - unused ++ * 29 - PCI BIOS CS ++ * 30 - PCI BIOS DS + * 31 - TSS for double fault handler + */ ++#define GDT_ENTRY_KERNEXEC_EFI_CS (1) ++#define GDT_ENTRY_KERNEXEC_EFI_DS (2) ++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8) ++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8) ++ + #define GDT_ENTRY_TLS_MIN 6 + #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +@@ -79,6 +84,8 @@ + + #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) ++ + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) + + #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) +@@ -104,6 +111,12 @@ + #define __KERNEL_STACK_CANARY 0 + #endif + ++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17) ++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) ++ ++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18) ++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) ++ + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 + + /* +@@ -141,7 +154,7 @@ + */ + + /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) ++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) + + + #else +@@ -165,6 +178,8 @@ + #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) + #define __USER32_DS __USER_DS + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 ++ + #define GDT_ENTRY_TSS 8 /* needs two entries */ + #define GDT_ENTRY_LDT 10 /* needs two entries */ + #define GDT_ENTRY_TLS_MIN 12 +@@ -185,6 +200,7 @@ + #endif + + #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) ++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) +@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment) + { + unsigned long __limit; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); +- return __limit + 1; ++ return __limit; + } + + #endif /* !__ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h +index b073aae..39f9bdd 100644 +--- a/arch/x86/include/asm/smp.h ++++ b/arch/x86/include/asm/smp.h +@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); + /* cpus sharing the last level cache: */ + DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); + DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); +-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); ++DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number); + + static inline struct cpumask *cpu_sibling_mask(int cpu) + { +@@ -79,7 +79,7 @@ struct smp_ops { + + void (*send_call_func_ipi)(const struct cpumask *mask); + void (*send_call_func_single_ipi)(int cpu); +-}; ++} __no_const; + + /* Globals due to paravirt */ + extern void set_cpu_sibling_map(int cpu); +@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata; + extern int safe_smp_processor_id(void); + + #elif defined(CONFIG_X86_64_SMP) +-#define raw_smp_processor_id() (this_cpu_read(cpu_number)) +- +-#define stack_smp_processor_id() \ +-({ \ +- struct thread_info *ti; \ +- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ +- ti->cpu; \ +-}) ++#define raw_smp_processor_id() (this_cpu_read(cpu_number)) ++#define stack_smp_processor_id() raw_smp_processor_id() + #define safe_smp_processor_id() smp_processor_id() + + #endif +diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h +index 33692ea..350a534 100644 +--- a/arch/x86/include/asm/spinlock.h ++++ b/arch/x86/include/asm/spinlock.h +@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock) + static inline void arch_read_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" +@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) + static inline void arch_write_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" +@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) + + static inline void arch_read_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" ++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + :"+m" (rw->lock) : : "memory"); + } + + static inline void arch_write_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" ++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); + } + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 6a99859..03cb807 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -47,7 +47,7 @@ + * head_32 for boot CPU and setup_per_cpu_areas() for others. + */ + #define GDT_STACK_CANARY_INIT \ +- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), ++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17), + + /* + * Initialize the stackprotector canary value. +@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu) + + static inline void load_stack_canary_segment(void) + { +-#ifdef CONFIG_X86_32 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) + asm volatile ("mov %0, %%gs" : : "r" (0)); + #endif + } +diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h +index 70bbe39..4ae2bd4 100644 +--- a/arch/x86/include/asm/stacktrace.h ++++ b/arch/x86/include/asm/stacktrace.h +@@ -11,28 +11,20 @@ + + extern int kstack_depth_to_print; + +-struct thread_info; ++struct task_struct; + struct stacktrace_ops; + +-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, +- unsigned long *stack, +- unsigned long bp, +- const struct stacktrace_ops *ops, +- void *data, +- unsigned long *end, +- int *graph); +- +-extern unsigned long +-print_context_stack(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); +- +-extern unsigned long +-print_context_stack_bp(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); ++typedef unsigned long walk_stack_t(struct task_struct *task, ++ void *stack_start, ++ unsigned long *stack, ++ unsigned long bp, ++ const struct stacktrace_ops *ops, ++ void *data, ++ unsigned long *end, ++ int *graph); ++ ++extern walk_stack_t print_context_stack; ++extern walk_stack_t print_context_stack_bp; + + /* Generic stack tracer with callbacks */ + +@@ -40,7 +32,7 @@ struct stacktrace_ops { + void (*address)(void *data, unsigned long address, int reliable); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +- walk_stack_t walk_stack; ++ walk_stack_t *walk_stack; + }; + + void dump_trace(struct task_struct *tsk, struct pt_regs *regs, +diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h +index 4ec45b3..a4f0a8a 100644 +--- a/arch/x86/include/asm/switch_to.h ++++ b/arch/x86/include/asm/switch_to.h +@@ -108,7 +108,7 @@ do { \ + "call __switch_to\n\t" \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ + __switch_canary \ +- "movq %P[thread_info](%%rsi),%%r8\n\t" \ ++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \ + "movq %%rax,%%rdi\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "jnz ret_from_fork\n\t" \ +@@ -119,7 +119,7 @@ do { \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ + [_tif_fork] "i" (_TIF_FORK), \ +- [thread_info] "i" (offsetof(struct task_struct, stack)), \ ++ [thread_info] "m" (current_tinfo), \ + [current_task] "m" (current_task) \ + __switch_canary_iparam \ + : "memory", "cc" __EXTRA_CLOBBER) +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index 2cd056e..0224df8 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <asm/page.h> + #include <asm/types.h> ++#include <asm/percpu.h> + + /* + * low level task data that entry.S needs immediate access to +@@ -23,7 +24,6 @@ struct exec_domain; + #include <linux/atomic.h> + + struct thread_info { +- struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + __u32 flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ +@@ -33,19 +33,13 @@ struct thread_info { + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +-#ifdef CONFIG_X86_32 +- unsigned long previous_esp; /* ESP of the previous stack in +- case of nested (IRQ) stacks +- */ +- __u8 supervisor_stack[0]; +-#endif ++ unsigned long lowest_stack; + unsigned int sig_on_uaccess_error:1; + unsigned int uaccess_err:1; /* uaccess failed */ + }; + +-#define INIT_THREAD_INFO(tsk) \ ++#define INIT_THREAD_INFO \ + { \ +- .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ +@@ -56,7 +50,7 @@ struct thread_info { + }, \ + } + +-#define init_thread_info (init_thread_union.thread_info) ++#define init_thread_info (init_thread_union.stack) + #define init_stack (init_thread_union.stack) + + #else /* !__ASSEMBLY__ */ +@@ -97,6 +91,7 @@ struct thread_info { + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ + #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ + #define TIF_X32 30 /* 32-bit native x86-64 binary */ ++#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */ + + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +@@ -121,17 +116,18 @@ struct thread_info { + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) + #define _TIF_ADDR32 (1 << TIF_ADDR32) + #define _TIF_X32 (1 << TIF_X32) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_enter() */ + #define _TIF_WORK_SYSCALL_ENTRY \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ +- _TIF_NOHZ) ++ _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ + #define _TIF_WORK_SYSCALL_EXIT \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ +- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK \ +@@ -142,7 +138,7 @@ struct thread_info { + /* work to do on any return to user space */ + #define _TIF_ALLWORK_MASK \ + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ +- _TIF_NOHZ) ++ _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* Only used for 64 bit */ + #define _TIF_DO_NOTIFY_MASK \ +@@ -158,6 +154,23 @@ struct thread_info { + + #define PREEMPT_ACTIVE 0x10000000 + ++#ifdef __ASSEMBLY__ ++/* how to get the thread information struct from ASM */ ++#define GET_THREAD_INFO(reg) \ ++ mov PER_CPU_VAR(current_tinfo), reg ++ ++/* use this one if reg already contains %esp */ ++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg) ++#else ++/* how to get the thread information struct from C */ ++DECLARE_PER_CPU(struct thread_info *, current_tinfo); ++ ++static __always_inline struct thread_info *current_thread_info(void) ++{ ++ return this_cpu_read_stable(current_tinfo); ++} ++#endif ++ + #ifdef CONFIG_X86_32 + + #define STACK_WARN (THREAD_SIZE/8) +@@ -168,35 +181,13 @@ struct thread_info { + */ + #ifndef __ASSEMBLY__ + +- + /* how to get the current stack pointer from C */ + register unsigned long current_stack_pointer asm("esp") __used; + +-/* how to get the thread information struct from C */ +-static inline struct thread_info *current_thread_info(void) +-{ +- return (struct thread_info *) +- (current_stack_pointer & ~(THREAD_SIZE - 1)); +-} +- +-#else /* !__ASSEMBLY__ */ +- +-/* how to get the thread information struct from ASM */ +-#define GET_THREAD_INFO(reg) \ +- movl $-THREAD_SIZE, reg; \ +- andl %esp, reg +- +-/* use this one if reg already contains %esp */ +-#define GET_THREAD_INFO_WITH_ESP(reg) \ +- andl $-THREAD_SIZE, reg +- + #endif + + #else /* X86_32 */ + +-#include <asm/percpu.h> +-#define KERNEL_STACK_OFFSET (5*8) +- + /* + * macros/functions for gaining access to the thread information structure + * preempt_count needs to be 1 initially, until the scheduler is functional. +@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void) + #ifndef __ASSEMBLY__ + DECLARE_PER_CPU(unsigned long, kernel_stack); + +-static inline struct thread_info *current_thread_info(void) +-{ +- struct thread_info *ti; +- ti = (void *)(this_cpu_read_stable(kernel_stack) + +- KERNEL_STACK_OFFSET - THREAD_SIZE); +- return ti; +-} +- +-#else /* !__ASSEMBLY__ */ +- +-/* how to get the thread information struct from ASM */ +-#define GET_THREAD_INFO(reg) \ +- movq PER_CPU_VAR(kernel_stack),reg ; \ +- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg +- +-/* +- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in +- * a certain register (to be used in assembler memory operands). +- */ +-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg) +- ++/* how to get the current stack pointer from C */ ++register unsigned long current_stack_pointer asm("rsp") __used; + #endif + + #endif /* !X86_32 */ +@@ -285,5 +257,12 @@ static inline bool is_ia32_task(void) + extern void arch_task_cache_init(void); + extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + extern void arch_release_task_struct(struct task_struct *tsk); ++ ++#define __HAVE_THREAD_FUNCTIONS ++#define task_thread_info(task) (&(task)->tinfo) ++#define task_stack_page(task) ((task)->stack) ++#define setup_thread_stack(p, org) do {} while (0) ++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1) ++ + #endif + #endif /* _ASM_X86_THREAD_INFO_H */ +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 5ee2687..70d5895 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -7,6 +7,7 @@ + #include <linux/compiler.h> + #include <linux/thread_info.h> + #include <linux/string.h> ++#include <linux/sched.h> + #include <asm/asm.h> + #include <asm/page.h> + #include <asm/smap.h> +@@ -29,7 +30,12 @@ + + #define get_ds() (KERNEL_DS) + #define get_fs() (current_thread_info()->addr_limit) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++void __set_fs(mm_segment_t x); ++void set_fs(mm_segment_t x); ++#else + #define set_fs(x) (current_thread_info()->addr_limit = (x)) ++#endif + + #define segment_eq(a, b) ((a).seg == (b).seg) + +@@ -77,8 +83,33 @@ + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ +-#define access_ok(type, addr, size) \ +- (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) ++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) ++#define access_ok(type, addr, size) \ ++({ \ ++ long __size = size; \ ++ unsigned long __addr = (unsigned long)addr; \ ++ unsigned long __addr_ao = __addr & PAGE_MASK; \ ++ unsigned long __end_ao = __addr + __size - 1; \ ++ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\ ++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ ++ while(__addr_ao <= __end_ao) { \ ++ char __c_ao; \ ++ __addr_ao += PAGE_SIZE; \ ++ if (__size > PAGE_SIZE) \ ++ cond_resched(); \ ++ if (__get_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ if (type != VERIFY_WRITE) { \ ++ __addr = __addr_ao; \ ++ continue; \ ++ } \ ++ if (__put_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ __addr = __addr_ao; \ ++ } \ ++ } \ ++ __ret_ao; \ ++}) + + /* + * The exception table consists of pairs of addresses relative to the +@@ -176,13 +207,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") + +- ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg "gs;" ++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" ++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" ++#else ++#define __copyuser_seg ++#define __COPYUSER_SET_ES ++#define __COPYUSER_RESTORE_ES ++#endif + + #ifdef CONFIG_X86_32 + #define __put_user_asm_u64(x, addr, err, errret) \ + asm volatile(ASM_STAC "\n" \ +- "1: movl %%eax,0(%2)\n" \ +- "2: movl %%edx,4(%2)\n" \ ++ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ + "3: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "4: movl %3,%0\n" \ +@@ -195,8 +234,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + + #define __put_user_asm_ex_u64(x, addr) \ + asm volatile(ASM_STAC "\n" \ +- "1: movl %%eax,0(%1)\n" \ +- "2: movl %%edx,4(%1)\n" \ ++ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ + "3: " ASM_CLAC "\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + _ASM_EXTABLE_EX(2b, 3b) \ +@@ -246,7 +285,7 @@ extern void __put_user_8(void); + __typeof__(*(ptr)) __pu_val; \ + __chk_user_ptr(ptr); \ + might_fault(); \ +- __pu_val = x; \ ++ __pu_val = (x); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_x(1, __pu_val, ptr, __ret_pu); \ +@@ -345,7 +384,7 @@ do { \ + + #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ + asm volatile(ASM_STAC "\n" \ +- "1: mov"itype" %2,%"rtype"1\n" \ ++ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ +@@ -353,7 +392,7 @@ do { \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (err), ltype(x) \ ++ : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + + #define __get_user_size_ex(x, ptr, size) \ +@@ -378,7 +417,7 @@ do { \ + } while (0) + + #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %1,%"rtype"0\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + : ltype(x) : "m" (__m(addr))) +@@ -395,13 +434,24 @@ do { \ + int __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ +- (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) + + /* FIXME: this hack is definitely wrong -AK */ + struct __large_struct { unsigned long buf[100]; }; +-#define __m(x) (*(struct __large_struct __user *)(x)) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define ____m(x) \ ++({ \ ++ unsigned long ____x = (unsigned long)(x); \ ++ if (____x < pax_user_shadow_base) \ ++ ____x += pax_user_shadow_base; \ ++ (typeof(x))____x; \ ++}) ++#else ++#define ____m(x) (x) ++#endif ++#define __m(x) (*(struct __large_struct __user *)____m(x)) + + /* + * Tell gcc we read from memory instead of writing: this is because +@@ -410,7 +460,7 @@ struct __large_struct { unsigned long buf[100]; }; + */ + #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ + asm volatile(ASM_STAC "\n" \ +- "1: mov"itype" %"rtype"1,%2\n" \ ++ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ +@@ -418,10 +468,10 @@ struct __large_struct { unsigned long buf[100]; }; + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ +- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) ++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) + + #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %"rtype"0,%1\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + : : ltype(x), "m" (__m(addr))) +@@ -460,8 +510,12 @@ struct __large_struct { unsigned long buf[100]; }; + * On error, the variable @x is set to zero. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __get_user(x, ptr) get_user((x), (ptr)) ++#else + #define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) ++#endif + + /** + * __put_user: - Write a simple value into user space, with less checking. +@@ -483,8 +537,12 @@ struct __large_struct { unsigned long buf[100]; }; + * Returns zero on success, or -EFAULT on error. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __put_user(x, ptr) put_user((x), (ptr)) ++#else + #define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) ++#endif + + #define __get_user_unaligned __get_user + #define __put_user_unaligned __put_user +@@ -502,7 +560,7 @@ struct __large_struct { unsigned long buf[100]; }; + #define get_user_ex(x, ptr) do { \ + unsigned long __gue_val; \ + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ +- (x) = (__force __typeof__(*(ptr)))__gue_val; \ ++ (x) = (__typeof__(*(ptr)))__gue_val; \ + } while (0) + + #define put_user_try uaccess_try +@@ -519,8 +577,8 @@ strncpy_from_user(char *dst, const char __user *src, long count); + extern __must_check long strlen_user(const char __user *str); + extern __must_check long strnlen_user(const char __user *str, long n); + +-unsigned long __must_check clear_user(void __user *mem, unsigned long len); +-unsigned long __must_check __clear_user(void __user *mem, unsigned long len); ++unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2); ++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2); + + /* + * movsl can be slow when source and dest are not both 8-byte aligned +diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h +index 7f760a9..04b1c65 100644 +--- a/arch/x86/include/asm/uaccess_32.h ++++ b/arch/x86/include/asm/uaccess_32.h +@@ -11,15 +11,15 @@ + #include <asm/page.h> + + unsigned long __must_check __copy_to_user_ll +- (void __user *to, const void *from, unsigned long n); ++ (void __user *to, const void *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nozero +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nocache +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nocache_nozero +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + + /** + * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. +@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero + static __always_inline unsigned long __must_check + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ check_object_size(from, n, true); ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check + __copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ + return __copy_to_user_inatomic(to, from, n); + } + + static __always_inline unsigned long + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not +@@ -137,6 +146,12 @@ static __always_inline unsigned long + __copy_from_user(void *to, const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ ++ check_object_size(to, n, false); ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -181,15 +200,19 @@ static __always_inline unsigned long + __copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) + { +- return __copy_from_user_ll_nocache_nozero(to, from, n); +-} ++ if ((long)n < 0) ++ return n; + +-unsigned long __must_check copy_to_user(void __user *to, +- const void *from, unsigned long n); +-unsigned long __must_check _copy_from_user(void *to, +- const void __user *from, +- unsigned long n); ++ return __copy_from_user_ll_nocache_nozero(to, from, n); ++} + ++extern void copy_to_user_overflow(void) ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS ++ __compiletime_error("copy_to_user() buffer size is not provably correct") ++#else ++ __compiletime_warning("copy_to_user() buffer size is not provably correct") ++#endif ++; + + extern void copy_from_user_overflow(void) + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void) + #endif + ; + +-static inline unsigned long __must_check copy_from_user(void *to, +- const void __user *from, +- unsigned long n) ++/** ++ * copy_to_user: - Copy a block of data into user space. ++ * @to: Destination address, in user space. ++ * @from: Source address, in kernel space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from kernel space to user space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ */ ++static inline unsigned long __must_check ++copy_to_user(void __user *to, const void *from, unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(from); + +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +- else +- copy_from_user_overflow(); ++ if (unlikely(sz != (size_t)-1 && sz < n)) ++ copy_to_user_overflow(); ++ else if (access_ok(VERIFY_WRITE, to, n)) ++ n = __copy_to_user(to, from, n); ++ return n; ++} + ++/** ++ * copy_from_user: - Copy a block of data from user space. ++ * @to: Destination address, in kernel space. ++ * @from: Source address, in user space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from user space to kernel space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ * ++ * If some data could not be copied, this function will pad the copied ++ * data to the requested size using zero bytes. ++ */ ++static inline unsigned long __must_check ++copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ size_t sz = __compiletime_object_size(to); ++ ++ check_object_size(to, n, false); ++ ++ if (unlikely(sz != (size_t)-1 && sz < n)) ++ copy_from_user_overflow(); ++ else if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if ((long)n > 0) ++ memset(to, 0, n); + return n; + } + +diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h +index 142810c..1f2a0a7 100644 +--- a/arch/x86/include/asm/uaccess_64.h ++++ b/arch/x86/include/asm/uaccess_64.h +@@ -10,6 +10,9 @@ + #include <asm/alternative.h> + #include <asm/cpufeature.h> + #include <asm/page.h> ++#include <asm/pgtable.h> ++ ++#define set_fs(x) (current_thread_info()->addr_limit = (x)) + + /* + * Copy To/From Userspace +@@ -17,13 +20,13 @@ + + /* Handles exceptions in both to and from, but doesn't do access_ok */ + __must_check unsigned long +-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); ++copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3); + __must_check unsigned long +-copy_user_generic_string(void *to, const void *from, unsigned len); ++copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3); + __must_check unsigned long +-copy_user_generic_unrolled(void *to, const void *from, unsigned len); ++copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3); + +-static __always_inline __must_check unsigned long ++static __always_inline __must_check __size_overflow(3) unsigned long + copy_user_generic(void *to, const void *from, unsigned len) + { + unsigned ret; +@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len) + ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), + "=d" (len)), + "1" (to), "2" (from), "3" (len) +- : "memory", "rcx", "r8", "r9", "r10", "r11"); ++ : "memory", "rcx", "r8", "r9", "r11"); + return ret; + } + ++static __always_inline __must_check unsigned long ++__copy_to_user(void __user *to, const void *from, unsigned long len); ++static __always_inline __must_check unsigned long ++__copy_from_user(void *to, const void __user *from, unsigned long len); + __must_check unsigned long +-_copy_to_user(void __user *to, const void *from, unsigned len); +-__must_check unsigned long +-_copy_from_user(void *to, const void __user *from, unsigned len); +-__must_check unsigned long +-copy_in_user(void __user *to, const void __user *from, unsigned len); ++copy_in_user(void __user *to, const void __user *from, unsigned long len); ++ ++extern void copy_to_user_overflow(void) ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS ++ __compiletime_error("copy_to_user() buffer size is not provably correct") ++#else ++ __compiletime_warning("copy_to_user() buffer size is not provably correct") ++#endif ++; ++ ++extern void copy_from_user_overflow(void) ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS ++ __compiletime_error("copy_from_user() buffer size is not provably correct") ++#else ++ __compiletime_warning("copy_from_user() buffer size is not provably correct") ++#endif ++; + + static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); +- + might_fault(); +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +-#ifdef CONFIG_DEBUG_VM +- else +- WARN(1, "Buffer overflow detected!\n"); +-#endif ++ ++ check_object_size(to, n, false); ++ ++ if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if (n < INT_MAX) ++ memset(to, 0, n); + return n; + } + + static __always_inline __must_check +-int copy_to_user(void __user *dst, const void *src, unsigned size) ++int copy_to_user(void __user *dst, const void *src, unsigned long size) + { + might_fault(); + +- return _copy_to_user(dst, src, size); ++ if (access_ok(VERIFY_WRITE, dst, size)) ++ size = __copy_to_user(dst, src, size); ++ return size; + } + + static __always_inline __must_check +-int __copy_from_user(void *dst, const void __user *src, unsigned size) ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) + { +- int ret = 0; ++ size_t sz = __compiletime_object_size(dst); ++ unsigned ret = 0; + + might_fault(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++ check_object_size(dst, size, false); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != (size_t)-1 && sz < size)) { ++ copy_from_user_overflow(); ++ return size; ++ } ++ + if (!__builtin_constant_p(size)) +- return copy_user_generic(dst, (__force void *)src, size); ++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); + switch (size) { +- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, ++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + return ret; +- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, ++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + return ret; +- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, ++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + return ret; +- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 8); + return ret; + case 10: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 10); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u16 *)(8 + (char *)dst), +- (u16 __user *)(8 + (char __user *)src), ++ (const u16 __user *)(8 + (const char __user *)src), + ret, "w", "w", "=r", 2); + return ret; + case 16: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u64 *)(8 + (char *)dst), +- (u64 __user *)(8 + (char __user *)src), ++ (const u64 __user *)(8 + (const char __user *)src), + ret, "q", "", "=r", 8); + return ret; + default: +- return copy_user_generic(dst, (__force void *)src, size); ++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); + } + } + + static __always_inline __must_check +-int __copy_to_user(void __user *dst, const void *src, unsigned size) ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) + { +- int ret = 0; ++ size_t sz = __compiletime_object_size(src); ++ unsigned ret = 0; + + might_fault(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++ check_object_size(src, size, true); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != (size_t)-1 && sz < size)) { ++ copy_to_user_overflow(); ++ return size; ++ } ++ + if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), src, size); + switch (size) { +- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, ++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); + return ret; +- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, ++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; +- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, ++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst, + ret, "l", "k", "ir", 4); + return ret; +- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + case 10: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 10); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, ++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; + case 16: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 16); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, ++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + default: +- return copy_user_generic((__force void *)dst, src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), src, size); + } + } + + static __always_inline __must_check +-int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) + { +- int ret = 0; ++ unsigned ret = 0; + + might_fault(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ + if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), ++ (__force_kernel const void *)____m(src), size); + switch (size) { + case 1: { + u8 tmp; +- __get_user_asm(tmp, (u8 __user *)src, ++ __get_user_asm(tmp, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + if (likely(!ret)) + __put_user_asm(tmp, (u8 __user *)dst, +@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 2: { + u16 tmp; +- __get_user_asm(tmp, (u16 __user *)src, ++ __get_user_asm(tmp, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + if (likely(!ret)) + __put_user_asm(tmp, (u16 __user *)dst, +@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + + case 4: { + u32 tmp; +- __get_user_asm(tmp, (u32 __user *)src, ++ __get_user_asm(tmp, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + if (likely(!ret)) + __put_user_asm(tmp, (u32 __user *)dst, +@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 8: { + u64 tmp; +- __get_user_asm(tmp, (u64 __user *)src, ++ __get_user_asm(tmp, (const u64 __user *)src, + ret, "q", "", "=r", 8); + if (likely(!ret)) + __put_user_asm(tmp, (u64 __user *)dst, +@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + return ret; + } + default: +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), ++ (__force_kernel const void *)____m(src), size); + } + } + + static __must_check __always_inline int +-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) ++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) + { +- return copy_user_generic(dst, (__force const void *)src, size); ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ ++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); + } + +-static __must_check __always_inline int +-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) ++static __must_check __always_inline unsigned long ++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) + { +- return copy_user_generic((__force void *)dst, src, size); ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)____m(dst), src, size); + } + +-extern long __copy_user_nocache(void *dst, const void __user *src, +- unsigned size, int zerorest); ++extern unsigned long __copy_user_nocache(void *dst, const void __user *src, ++ unsigned long size, int zerorest) __size_overflow(3); + +-static inline int +-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) ++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) + { + might_sleep(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 1); + } + +-static inline int +-__copy_from_user_inatomic_nocache(void *dst, const void __user *src, +- unsigned size) ++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, ++ unsigned long size) + { ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 0); + } + +-unsigned long +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); ++extern unsigned long ++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3); + + #endif /* _ASM_X86_UACCESS_64_H */ +diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h +index 5b238981..77fdd78 100644 +--- a/arch/x86/include/asm/word-at-a-time.h ++++ b/arch/x86/include/asm/word-at-a-time.h +@@ -11,7 +11,7 @@ + * and shift, for example. + */ + struct word_at_a_time { +- const unsigned long one_bits, high_bits; ++ unsigned long one_bits, high_bits; + }; + + #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } +diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h +index d8d9922..bf6cecb 100644 +--- a/arch/x86/include/asm/x86_init.h ++++ b/arch/x86/include/asm/x86_init.h +@@ -129,7 +129,7 @@ struct x86_init_ops { + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; +-}; ++} __no_const; + + /** + * struct x86_cpuinit_ops - platform specific cpu hotplug setups +@@ -140,7 +140,7 @@ struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(void); + void (*early_percpu_clock_init)(void); + void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); +-}; ++} __no_const; + + /** + * struct x86_platform_ops - platform specific runtime functions +@@ -166,7 +166,7 @@ struct x86_platform_ops { + void (*save_sched_clock_state)(void); + void (*restore_sched_clock_state)(void); + void (*apic_post_init)(void); +-}; ++} __no_const; + + struct pci_dev; + struct msi_msg; +@@ -180,7 +180,7 @@ struct x86_msi_ops { + void (*teardown_msi_irqs)(struct pci_dev *dev); + void (*restore_msi_irqs)(struct pci_dev *dev, int irq); + int (*setup_hpet_msi)(unsigned int irq, unsigned int id); +-}; ++} __no_const; + + struct IO_APIC_route_entry; + struct io_apic_irq_attr; +@@ -201,7 +201,7 @@ struct x86_io_apic_ops { + unsigned int destination, int vector, + struct io_apic_irq_attr *attr); + void (*eoi_ioapic_pin)(int apic, int pin, int vector); +-}; ++} __no_const; + + extern struct x86_init_ops x86_init; + extern struct x86_cpuinit_ops x86_cpuinit; +diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h +index 0415cda..b43d877 100644 +--- a/arch/x86/include/asm/xsave.h ++++ b/arch/x86/include/asm/xsave.h +@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf) + return -EFAULT; + + __asm__ __volatile__(ASM_STAC "\n" +- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" ++ "1:" ++ __copyuser_seg ++ ".byte " REX_PREFIX "0x0f,0xae,0x27\n" + "2: " ASM_CLAC "\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" +@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf) + static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) + { + int err; +- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); ++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf); + u32 lmask = mask; + u32 hmask = mask >> 32; + + __asm__ __volatile__(ASM_STAC "\n" +- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" ++ "1:" ++ __copyuser_seg ++ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "2: " ASM_CLAC "\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" +diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h +index bbae024..e1528f9 100644 +--- a/arch/x86/include/uapi/asm/e820.h ++++ b/arch/x86/include/uapi/asm/e820.h +@@ -63,7 +63,7 @@ struct e820map { + #define ISA_START_ADDRESS 0xa0000 + #define ISA_END_ADDRESS 0x100000 + +-#define BIOS_BEGIN 0x000a0000 ++#define BIOS_BEGIN 0x000c0000 + #define BIOS_END 0x00100000 + + #define BIOS_ROM_BASE 0xffe00000 +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile +index 7bd3bd3..5dac791 100644 +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o + obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o + obj-$(CONFIG_IRQ_WORK) += irq_work.o + obj-y += probe_roms.o +-obj-$(CONFIG_X86_32) += i386_ksyms_32.o ++obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o + obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o + obj-y += syscall_$(BITS).o + obj-$(CONFIG_X86_64) += vsyscall_64.o +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 230c8ea..f915130 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) + * If your system is blacklisted here, but you find that acpi=force + * works for you, please contact linux-acpi@vger.kernel.org + */ +-static struct dmi_system_id __initdata acpi_dmi_table[] = { ++static const struct dmi_system_id __initconst acpi_dmi_table[] = { + /* + * Boxes that need ACPI disabled + */ +@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { + }; + + /* second table for DMI checks that should run after early-quirks */ +-static struct dmi_system_id __initdata acpi_dmi_table_late[] = { ++static const struct dmi_system_id __initconst acpi_dmi_table_late[] = { + /* + * HP laptops which use a DSDT reporting as HP/SB400/10000, + * which includes some code which overrides all temperature +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 0532f5d..36afc0a 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void) + #else /* CONFIG_64BIT */ + #ifdef CONFIG_SMP + stack_start = (unsigned long)temp_stack + sizeof(temp_stack); ++ ++ pax_open_kernel(); + early_gdt_descr.address = + (unsigned long)get_cpu_gdt_table(smp_processor_id()); ++ pax_close_kernel(); ++ + initial_gs = per_cpu_offset(smp_processor_id()); + #endif + initial_code = (unsigned long)wakeup_long64; +diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S +index 13ab720..95d5442 100644 +--- a/arch/x86/kernel/acpi/wakeup_32.S ++++ b/arch/x86/kernel/acpi/wakeup_32.S +@@ -30,13 +30,11 @@ wakeup_pmode_return: + # and restore the stack ... but you need gdt for this to work + movl saved_context_esp, %esp + +- movl %cs:saved_magic, %eax +- cmpl $0x12345678, %eax ++ cmpl $0x12345678, saved_magic + jne bogus_magic + + # jump to place where we left off +- movl saved_eip, %eax +- jmp *%eax ++ jmp *(saved_eip) + + bogus_magic: + jmp bogus_magic +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index ef5ccca..bd83949 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start, + */ + for (a = start; a < end; a++) { + instr = (u8 *)&a->instr_offset + a->instr_offset; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr) ++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + replacement = (u8 *)&a->repl_offset + a->repl_offset; + BUG_ON(a->replacementlen > a->instrlen); + BUG_ON(a->instrlen > sizeof(insnbuf)); +@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn DS segment override prefix into lock prefix */ +- if (*ptr == 0x3e) ++ if (*ktla_ktva(ptr) == 0x3e) + text_poke(ptr, ((unsigned char []){0xf0}), 1); + } + mutex_unlock(&text_mutex); +@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn lock prefix into DS segment override prefix */ +- if (*ptr == 0xf0) ++ if (*ktla_ktva(ptr) == 0xf0) + text_poke(ptr, ((unsigned char []){0x3E}), 1); + } + mutex_unlock(&text_mutex); +@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, + + BUG_ON(p->len > MAX_PATCH_LEN); + /* prep the buffer with the original instructions */ +- memcpy(insnbuf, p->instr, p->len); ++ memcpy(insnbuf, ktla_ktva(p->instr), p->len); + used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, + (unsigned long)p->instr, p->len); + +@@ -515,7 +534,7 @@ void __init alternative_instructions(void) + if (!uniproc_patched || num_possible_cpus() == 1) + free_init_pages("SMP alternatives", + (unsigned long)__smp_locks, +- (unsigned long)__smp_locks_end); ++ PAGE_ALIGN((unsigned long)__smp_locks_end)); + #endif + + apply_paravirt(__parainstructions, __parainstructions_end); +@@ -535,13 +554,17 @@ void __init alternative_instructions(void) + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. + */ +-void *__init_or_module text_poke_early(void *addr, const void *opcode, ++void *__kprobes text_poke_early(void *addr, const void *opcode, + size_t len) + { + unsigned long flags; + local_irq_save(flags); +- memcpy(addr, opcode, len); ++ ++ pax_open_kernel(); ++ memcpy(ktla_ktva(addr), opcode, len); + sync_core(); ++ pax_close_kernel(); ++ + local_irq_restore(flags); + /* Could also do a CLFLUSH here to speed up CPU recovery; but + that causes hangs on some VIA CPUs. */ +@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, + */ + void *__kprobes text_poke(void *addr, const void *opcode, size_t len) + { +- unsigned long flags; +- char *vaddr; ++ unsigned char *vaddr = ktla_ktva(addr); + struct page *pages[2]; +- int i; ++ size_t i; + + if (!core_kernel_text((unsigned long)addr)) { +- pages[0] = vmalloc_to_page(addr); +- pages[1] = vmalloc_to_page(addr + PAGE_SIZE); ++ pages[0] = vmalloc_to_page(vaddr); ++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); + } else { +- pages[0] = virt_to_page(addr); ++ pages[0] = virt_to_page(vaddr); + WARN_ON(!PageReserved(pages[0])); +- pages[1] = virt_to_page(addr + PAGE_SIZE); ++ pages[1] = virt_to_page(vaddr + PAGE_SIZE); + } + BUG_ON(!pages[0]); +- local_irq_save(flags); +- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); +- if (pages[1]) +- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); +- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); +- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); +- clear_fixmap(FIX_TEXT_POKE0); +- if (pages[1]) +- clear_fixmap(FIX_TEXT_POKE1); +- local_flush_tlb(); +- sync_core(); +- /* Could also do a CLFLUSH here to speed up CPU recovery; but +- that causes hangs on some VIA CPUs. */ ++ text_poke_early(addr, opcode, len); + for (i = 0; i < len; i++) +- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); +- local_irq_restore(flags); ++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); + return addr; + } + +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 904611b..004dde6 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -189,7 +189,7 @@ int first_system_vector = 0xfe; + /* + * Debug level, exported for io_apic.c + */ +-unsigned int apic_verbosity; ++int apic_verbosity; + + int pic_mode; + +@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs) + apic_write(APIC_ESR, 0); + v1 = apic_read(APIC_ESR); + ack_APIC_irq(); +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", + smp_processor_id(), v0 , v1); +diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c +index 00c77cf..2dc6a2d 100644 +--- a/arch/x86/kernel/apic/apic_flat_64.c ++++ b/arch/x86/kernel/apic/apic_flat_64.c +@@ -157,7 +157,7 @@ static int flat_probe(void) + return 1; + } + +-static struct apic apic_flat = { ++static struct apic apic_flat __read_only = { + .name = "flat", + .probe = flat_probe, + .acpi_madt_oem_check = flat_acpi_madt_oem_check, +@@ -271,7 +271,7 @@ static int physflat_probe(void) + return 0; + } + +-static struct apic apic_physflat = { ++static struct apic apic_physflat __read_only = { + + .name = "physical flat", + .probe = physflat_probe, +diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c +index e145f28..2752888 100644 +--- a/arch/x86/kernel/apic/apic_noop.c ++++ b/arch/x86/kernel/apic/apic_noop.c +@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v) + WARN_ON_ONCE(cpu_has_apic && !disable_apic); + } + +-struct apic apic_noop = { ++struct apic apic_noop __read_only = { + .name = "noop", + .probe = noop_probe, + .acpi_madt_oem_check = NULL, +diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c +index d50e364..543bee3 100644 +--- a/arch/x86/kernel/apic/bigsmp_32.c ++++ b/arch/x86/kernel/apic/bigsmp_32.c +@@ -152,7 +152,7 @@ static int probe_bigsmp(void) + return dmi_bigsmp; + } + +-static struct apic apic_bigsmp = { ++static struct apic apic_bigsmp __read_only = { + + .name = "bigsmp", + .probe = probe_bigsmp, +diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c +index 0874799..a7a7892 100644 +--- a/arch/x86/kernel/apic/es7000_32.c ++++ b/arch/x86/kernel/apic/es7000_32.c +@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, + return ret && es7000_apic_is_cluster(); + } + +-/* We've been warned by a false positive warning.Use __refdata to keep calm. */ +-static struct apic __refdata apic_es7000_cluster = { ++static struct apic apic_es7000_cluster __read_only = { + + .name = "es7000", + .probe = probe_es7000, +@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = { + .x86_32_early_logical_apicid = es7000_early_logical_apicid, + }; + +-static struct apic __refdata apic_es7000 = { ++static struct apic apic_es7000 __read_only = { + + .name = "es7000", + .probe = probe_es7000, +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 9ed796c..e930fe4 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, + } + EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +-void lock_vector_lock(void) ++void lock_vector_lock(void) __acquires(vector_lock) + { + /* Used to the online set of cpus does not change + * during assign_irq_vector. +@@ -1068,7 +1068,7 @@ void lock_vector_lock(void) + raw_spin_lock(&vector_lock); + } + +-void unlock_vector_lock(void) ++void unlock_vector_lock(void) __releases(vector_lock) + { + raw_spin_unlock(&vector_lock); + } +@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data) + ack_APIC_irq(); + } + +-atomic_t irq_mis_count; ++atomic_unchecked_t irq_mis_count; + + #ifdef CONFIG_GENERIC_PENDING_IRQ + static bool io_apic_level_ack_pending(struct irq_cfg *cfg) +@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data) + * at the cpu. + */ + if (!(v & (1 << (i & 0x1f)))) { +- atomic_inc(&irq_mis_count); ++ atomic_inc_unchecked(&irq_mis_count); + + eoi_ioapic_irq(irq, cfg); + } +diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c +index d661ee9..791fd33 100644 +--- a/arch/x86/kernel/apic/numaq_32.c ++++ b/arch/x86/kernel/apic/numaq_32.c +@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void) + (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); + } + +-/* Use __refdata to keep false positive warning calm. */ +-static struct apic __refdata apic_numaq = { ++static struct apic apic_numaq __read_only = { + + .name = "NUMAQ", + .probe = probe_numaq, +diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c +index eb35ef9..f184a21 100644 +--- a/arch/x86/kernel/apic/probe_32.c ++++ b/arch/x86/kernel/apic/probe_32.c +@@ -72,7 +72,7 @@ static int probe_default(void) + return 1; + } + +-static struct apic apic_default = { ++static struct apic apic_default __read_only = { + + .name = "default", + .probe = probe_default, +diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c +index 77c95c0..434f8a4 100644 +--- a/arch/x86/kernel/apic/summit_32.c ++++ b/arch/x86/kernel/apic/summit_32.c +@@ -486,7 +486,7 @@ void setup_summit(void) + } + #endif + +-static struct apic apic_summit = { ++static struct apic apic_summit __read_only = { + + .name = "summit", + .probe = probe_summit, +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index c88baa4..757aee1 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) + return notifier_from_errno(err); + } + +-static struct notifier_block __refdata x2apic_cpu_notifier = { ++static struct notifier_block x2apic_cpu_notifier = { + .notifier_call = update_clusterinfo, + }; + +@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, + cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); + } + +-static struct apic apic_x2apic_cluster = { ++static struct apic apic_x2apic_cluster __read_only = { + + .name = "cluster x2apic", + .probe = x2apic_cluster_probe, +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index 562a76d..a003c0f 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void) + return apic == &apic_x2apic_phys; + } + +-static struct apic apic_x2apic_phys = { ++static struct apic apic_x2apic_phys __read_only = { + + .name = "physical x2apic", + .probe = x2apic_phys_probe, +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c +index 794f6eb..67e1db2 100644 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c +@@ -342,7 +342,7 @@ static int uv_probe(void) + return apic == &apic_x2apic_uv_x; + } + +-static struct apic __refdata apic_x2apic_uv_x = { ++static struct apic apic_x2apic_uv_x __read_only = { + + .name = "UV large system", + .probe = uv_probe, +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c +index 66b5faf..3442423 100644 +--- a/arch/x86/kernel/apm_32.c ++++ b/arch/x86/kernel/apm_32.c +@@ -434,7 +434,7 @@ static DEFINE_MUTEX(apm_mutex); + * This is for buggy BIOS's that refer to (real mode) segment 0x40 + * even though they are called in protected mode. + */ +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + static const char driver_version[] = "1.16ac"; /* no spaces */ +@@ -612,7 +612,10 @@ static long __apm_bios_call(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -621,7 +624,11 @@ static long __apm_bios_call(void *_call) + &call->esi); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + return call->eax & 0xff; +@@ -688,7 +695,10 @@ static long __apm_bios_call_simple(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -696,7 +706,11 @@ static long __apm_bios_call_simple(void *_call) + &call->eax); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + return error; + } +@@ -2363,12 +2377,15 @@ static int __init apm_init(void) + * code to that CPU. + */ + gdt = get_cpu_gdt_table(0); ++ ++ pax_open_kernel(); + set_desc_base(&gdt[APM_CS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); + set_desc_base(&gdt[APM_CS_16 >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_desc_base(&gdt[APM_DS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); ++ pax_close_kernel(); + + proc_create("apm", 0, NULL, &apm_file_ops); + +diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c +index 2861082..6d4718e 100644 +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -33,6 +33,8 @@ void common(void) { + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_preempt_count, thread_info, preempt_count); ++ OFFSET(TI_lowest_stack, thread_info, lowest_stack); ++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo)); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +@@ -53,8 +55,26 @@ void common(void) { + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); ++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); ++#ifdef CONFIG_X86_64 ++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched); ++#endif + #endif + ++#endif ++ ++ BLANK(); ++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE); ++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); ++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE); ++ + #ifdef CONFIG_XEN + BLANK(); + OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); +diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c +index 1b4754f..fbb4227 100644 +--- a/arch/x86/kernel/asm-offsets_64.c ++++ b/arch/x86/kernel/asm-offsets_64.c +@@ -76,6 +76,7 @@ int main(void) + BLANK(); + #undef ENTRY + ++ DEFINE(TSS_size, sizeof(struct tss_struct)); + OFFSET(TSS_ist, tss_struct, x86_tss.ist); + BLANK(); + +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile +index a0e067d..9c7db16 100644 +--- a/arch/x86/kernel/cpu/Makefile ++++ b/arch/x86/kernel/cpu/Makefile +@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg + CFLAGS_REMOVE_perf_event.o = -pg + endif + +-# Make sure load_percpu_segment has no stackprotector +-nostackp := $(call cc-option, -fno-stack-protector) +-CFLAGS_common.o := $(nostackp) +- + obj-y := intel_cacheinfo.o scattered.o topology.o + obj-y += proc.o capflags.o powerflags.o common.o + obj-y += vmware.o hypervisor.o mshyperv.o +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index fa96eb0..03efe73 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -737,7 +737,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, + unsigned int size) + { + /* AMD errata T13 (order #21922) */ +- if ((c->x86 == 6)) { ++ if (c->x86 == 6) { + /* Duron Rev A0 */ + if (c->x86_model == 3 && c->x86_mask == 0) + size = 64; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index d814772..c615653 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = { + + static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; + +-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { +-#ifdef CONFIG_X86_64 +- /* +- * We need valid kernel segments for data and code in long mode too +- * IRET will check the segment types kkeil 2000/10/28 +- * Also sysret mandates a special GDT layout +- * +- * TLS descriptors are currently at a different place compared to i386. +- * Hopefully nobody expects them at a fixed place (Wine?) +- */ +- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), +-#else +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), +- /* +- * Segments used for calling PnP BIOS have byte granularity. +- * They code segments and data segments have fixed 64k limits, +- * the transfer segment sizes are set at run time. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* +- * The APM segments have byte granularity and their bases +- * are set at run time. All have 64k limits. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* data */ +- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), +- +- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- GDT_STACK_CANARY_INIT +-#endif +-} }; +-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +- + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); +@@ -386,7 +332,7 @@ void switch_to_new_gdt(int cpu) + { + struct desc_ptr gdt_descr; + +- gdt_descr.address = (long)get_cpu_gdt_table(cpu); ++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + /* Reload the per-cpu base */ +@@ -882,6 +828,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ setup_clear_cpu_cap(X86_FEATURE_SEP); ++#endif ++ + /* If the model name is still unset, do table lookup. */ + if (!c->x86_model_id[0]) { + const char *p; +@@ -1065,10 +1015,12 @@ static __init int setup_disablecpuid(char *arg) + } + __setup("clearcpuid=", setup_disablecpuid); + ++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo; ++EXPORT_PER_CPU_SYMBOL(current_tinfo); ++ + #ifdef CONFIG_X86_64 + struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; +-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, +- (unsigned long) nmi_idt_table }; ++struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table }; + + DEFINE_PER_CPU_FIRST(union irq_stack_union, + irq_stack_union) __aligned(PAGE_SIZE); +@@ -1082,7 +1034,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = + EXPORT_PER_CPU_SYMBOL(current_task); + + DEFINE_PER_CPU(unsigned long, kernel_stack) = +- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; ++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE; + EXPORT_PER_CPU_SYMBOL(kernel_stack); + + DEFINE_PER_CPU(char *, irq_stack_ptr) = +@@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void) + load_ucode_ap(); + + cpu = stack_smp_processor_id(); +- t = &per_cpu(init_tss, cpu); ++ t = init_tss + cpu; + oist = &per_cpu(orig_ist, cpu); + + #ifdef CONFIG_NUMA +@@ -1253,7 +1205,7 @@ void __cpuinit cpu_init(void) + switch_to_new_gdt(cpu); + loadsegment(fs, 0); + +- load_idt((const struct desc_ptr *)&idt_descr); ++ load_idt(&idt_descr); + + memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); + syscall_init(); +@@ -1262,7 +1214,6 @@ void __cpuinit cpu_init(void) + wrmsrl(MSR_KERNEL_GS_BASE, 0); + barrier(); + +- x86_configure_nx(); + enable_x2apic(); + + /* +@@ -1314,7 +1265,7 @@ void __cpuinit cpu_init(void) + { + int cpu = smp_processor_id(); + struct task_struct *curr = current; +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + struct thread_struct *thread = &curr->thread; + + show_ucode_info_early(); +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 1905ce9..a7ac587 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -173,7 +173,7 @@ static void __cpuinit trap_init_f00f_bug(void) + * Update the IDT descriptor and reload the IDT so that + * it uses the read-only mapped virtual address. + */ +- idt_descr.address = fix_to_virt(FIX_F00F_IDT); ++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); + load_idt(&idt_descr); + } + #endif +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c +index 7c6f7d5..8cac382 100644 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c +@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = { + }; + + #ifdef CONFIG_AMD_NB ++static struct attribute *default_attrs_amd_nb[] = { ++ &type.attr, ++ &level.attr, ++ &coherency_line_size.attr, ++ &physical_line_partition.attr, ++ &ways_of_associativity.attr, ++ &number_of_sets.attr, ++ &size.attr, ++ &shared_cpu_map.attr, ++ &shared_cpu_list.attr, ++ NULL, ++ NULL, ++ NULL, ++ NULL ++}; ++ + static struct attribute ** __cpuinit amd_l3_attrs(void) + { + static struct attribute **attrs; +@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) + + n = ARRAY_SIZE(default_attrs); + +- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) +- n += 2; +- +- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) +- n += 1; +- +- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); +- if (attrs == NULL) +- return attrs = default_attrs; +- +- for (n = 0; default_attrs[n]; n++) +- attrs[n] = default_attrs[n]; ++ attrs = default_attrs_amd_nb; + + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { + attrs[n++] = &cache_disable_0.attr; +@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = { + .default_attrs = default_attrs, + }; + ++#ifdef CONFIG_AMD_NB ++static struct kobj_type ktype_cache_amd_nb = { ++ .sysfs_ops = &sysfs_ops, ++ .default_attrs = default_attrs_amd_nb, ++}; ++#endif ++ + static struct kobj_type ktype_percpu_entry = { + .sysfs_ops = &sysfs_ops, + }; +@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev) + return retval; + } + ++#ifdef CONFIG_AMD_NB ++ amd_l3_attrs(); ++#endif ++ + for (i = 0; i < num_cache_leaves; i++) { ++ struct kobj_type *ktype; ++ + this_object = INDEX_KOBJECT_PTR(cpu, i); + this_object->cpu = cpu; + this_object->index = i; + + this_leaf = CPUID4_INFO_IDX(cpu, i); + +- ktype_cache.default_attrs = default_attrs; ++ ktype = &ktype_cache; + #ifdef CONFIG_AMD_NB + if (this_leaf->base.nb) +- ktype_cache.default_attrs = amd_l3_attrs(); ++ ktype = &ktype_cache_amd_nb; + #endif + retval = kobject_init_and_add(&(this_object->kobj), +- &ktype_cache, ++ ktype, + per_cpu(ici_cache_kobject, cpu), + "index%1lu", i); + if (unlikely(retval)) { +@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { ++static struct notifier_block cacheinfo_cpu_notifier = { + .notifier_call = cacheinfo_cpu_callback, + }; + +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index 7bc1263..bff5686 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -45,6 +45,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/local.h> + + #include "mce-internal.h" + +@@ -246,7 +247,7 @@ static void print_mce(struct mce *m) + !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", + m->cs, m->ip); + +- if (m->cs == __KERNEL_CS) ++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) + print_symbol("{%s}", m->ip); + pr_cont("\n"); + } +@@ -279,10 +280,10 @@ static void print_mce(struct mce *m) + + #define PANIC_TIMEOUT 5 /* 5 seconds */ + +-static atomic_t mce_paniced; ++static atomic_unchecked_t mce_paniced; + + static int fake_panic; +-static atomic_t mce_fake_paniced; ++static atomic_unchecked_t mce_fake_paniced; + + /* Panic in progress. Enable interrupts and wait for final IPI */ + static void wait_for_panic(void) +@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + /* + * Make sure only one CPU runs in machine check panic + */ +- if (atomic_inc_return(&mce_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_paniced) > 1) + wait_for_panic(); + barrier(); + +@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + console_verbose(); + } else { + /* Don't log too much for fake panic */ +- if (atomic_inc_return(&mce_fake_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) + return; + } + /* First print corrected ones that are still unlogged */ +@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + if (!fake_panic) { + if (panic_timeout == 0) + panic_timeout = mca_cfg.panic_timeout; +- panic(msg); ++ panic("%s", msg); + } else + pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); + } +@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t) + * might have been modified by someone else. + */ + rmb(); +- if (atomic_read(&mce_paniced)) ++ if (atomic_read_unchecked(&mce_paniced)) + wait_for_panic(); + if (!mca_cfg.monarch_timeout) + goto out; +@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) + } + + /* Call the installed machine check handler for this CPU setup. */ +-void (*machine_check_vector)(struct pt_regs *, long error_code) = ++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only = + unexpected_machine_check; + + /* +@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) + return; + } + ++ pax_open_kernel(); + machine_check_vector = do_machine_check; ++ pax_close_kernel(); + + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(c); +@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) + */ + + static DEFINE_SPINLOCK(mce_chrdev_state_lock); +-static int mce_chrdev_open_count; /* #times opened */ ++static local_t mce_chrdev_open_count; /* #times opened */ + static int mce_chrdev_open_exclu; /* already open exclusive? */ + + static int mce_chrdev_open(struct inode *inode, struct file *file) +@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + spin_lock(&mce_chrdev_state_lock); + + if (mce_chrdev_open_exclu || +- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { ++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) { + spin_unlock(&mce_chrdev_state_lock); + + return -EBUSY; +@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + + if (file->f_flags & O_EXCL) + mce_chrdev_open_exclu = 1; +- mce_chrdev_open_count++; ++ local_inc(&mce_chrdev_open_count); + + spin_unlock(&mce_chrdev_state_lock); + +@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file) + { + spin_lock(&mce_chrdev_state_lock); + +- mce_chrdev_open_count--; ++ local_dec(&mce_chrdev_open_count); + mce_chrdev_open_exclu = 0; + + spin_unlock(&mce_chrdev_state_lock); +@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + return NOTIFY_OK; + } + +-static struct notifier_block mce_cpu_notifier __cpuinitdata = { ++static struct notifier_block mce_cpu_notifier = { + .notifier_call = mce_cpu_callback, + }; + +@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void) + + for (i = 0; i < mca_cfg.banks; i++) { + struct mce_bank *b = &mce_banks[i]; +- struct device_attribute *a = &b->attr; ++ device_attribute_no_const *a = &b->attr; + + sysfs_attr_init(&a->attr); + a->attr.name = b->attrname; +@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void) + static void mce_reset(void) + { + cpu_missing = 0; +- atomic_set(&mce_fake_paniced, 0); ++ atomic_set_unchecked(&mce_fake_paniced, 0); + atomic_set(&mce_executing, 0); + atomic_set(&mce_callin, 0); + atomic_set(&global_nwo, 0); +diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c +index 1c044b1..37a2a43 100644 +--- a/arch/x86/kernel/cpu/mcheck/p5.c ++++ b/arch/x86/kernel/cpu/mcheck/p5.c +@@ -11,6 +11,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* By default disabled */ + int mce_p5_enabled __read_mostly; +@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) + if (!cpu_has(c, X86_FEATURE_MCE)) + return; + ++ pax_open_kernel(); + machine_check_vector = pentium_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c +index 47a1870..8c019a7 100644 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c +@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, + return notifier_from_errno(err); + } + +-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = ++static struct notifier_block thermal_throttle_cpu_notifier = + { + .notifier_call = thermal_throttle_cpu_callback, + }; +diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c +index e9a701a..35317d6 100644 +--- a/arch/x86/kernel/cpu/mcheck/winchip.c ++++ b/arch/x86/kernel/cpu/mcheck/winchip.c +@@ -10,6 +10,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* Machine check handler for WinChip C6: */ + static void winchip_machine_check(struct pt_regs *regs, long error_code) +@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) + { + u32 lo, hi; + ++ pax_open_kernel(); + machine_check_vector = winchip_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c +index 726bf96..81f0526 100644 +--- a/arch/x86/kernel/cpu/mtrr/main.c ++++ b/arch/x86/kernel/cpu/mtrr/main.c +@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex); + u64 size_or_mask, size_and_mask; + static bool mtrr_aps_delayed_init; + +-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; ++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; + + const struct mtrr_ops *mtrr_if; + +diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h +index df5e41f..816c719 100644 +--- a/arch/x86/kernel/cpu/mtrr/mtrr.h ++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h +@@ -25,7 +25,7 @@ struct mtrr_ops { + int (*validate_add_page)(unsigned long base, unsigned long size, + unsigned int type); + int (*have_wrcomb)(void); +-}; ++} __do_const; + + extern int generic_get_free_region(unsigned long base, unsigned long size, + int replace_reg); +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index bf0f01a..9adfee1 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void) + pr_info("no hardware sampling interrupt available.\n"); + } + +-static struct attribute_group x86_pmu_format_group = { ++static attribute_group_no_const x86_pmu_format_group = { + .name = "format", + .attrs = NULL, + }; +@@ -1374,7 +1374,7 @@ static struct attribute *events_attr[] = { + NULL, + }; + +-static struct attribute_group x86_pmu_events_group = { ++static attribute_group_no_const x86_pmu_events_group = { + .name = "events", + .attrs = events_attr, + }; +@@ -1873,7 +1873,7 @@ static unsigned long get_segment_base(unsigned int segment) + if (idx > GDT_ENTRIES) + return 0; + +- desc = __this_cpu_ptr(&gdt_page.gdt[0]); ++ desc = get_cpu_gdt_table(smp_processor_id()); + } + + return get_desc_base(desc + idx); +@@ -1963,7 +1963,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) + break; + + perf_callchain_store(entry, frame.return_address); +- fp = frame.next_frame; ++ fp = (const void __force_user *)frame.next_frame; + } + } + +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c +index 4a0a462..be3b204 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -1994,10 +1994,10 @@ __init int intel_pmu_init(void) + * v2 and above have a perf capabilities MSR + */ + if (version > 1) { +- u64 capabilities; ++ u64 capabilities = x86_pmu.intel_cap.capabilities; + +- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); +- x86_pmu.intel_cap.capabilities = capabilities; ++ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities)) ++ x86_pmu.intel_cap.capabilities = capabilities; + } + + intel_ds_init(); +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +index 3e091f0..d2dc8d6 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types) + static int __init uncore_type_init(struct intel_uncore_type *type) + { + struct intel_uncore_pmu *pmus; +- struct attribute_group *attr_group; ++ attribute_group_no_const *attr_group; + struct attribute **attrs; + int i, j; + +@@ -2826,7 +2826,7 @@ static int + return NOTIFY_OK; + } + +-static struct notifier_block uncore_cpu_nb __cpuinitdata = { ++static struct notifier_block uncore_cpu_nb = { + .notifier_call = uncore_cpu_notifier, + /* + * to migrate uncore events, our notifier should be executed +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h +index e68a455..975a932 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h +@@ -428,7 +428,7 @@ struct intel_uncore_box { + struct uncore_event_desc { + struct kobj_attribute attr; + const char *config; +-}; ++} __do_const; + + #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ + { \ +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c +index 1e4dbcf..b9a34c2 100644 +--- a/arch/x86/kernel/cpuid.c ++++ b/arch/x86/kernel/cpuid.c +@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, + return notifier_from_errno(err); + } + +-static struct notifier_block __refdata cpuid_class_cpu_notifier = ++static struct notifier_block cpuid_class_cpu_notifier = + { + .notifier_call = cpuid_class_cpu_callback, + }; +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c +index 74467fe..18793d5 100644 +--- a/arch/x86/kernel/crash.c ++++ b/arch/x86/kernel/crash.c +@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + struct pt_regs fixed_regs; +-#endif + +-#ifdef CONFIG_X86_32 +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + crash_fixup_ss_esp(&fixed_regs, regs); + regs = &fixed_regs; + } +diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c +index afa64ad..dce67dd 100644 +--- a/arch/x86/kernel/crash_dump_64.c ++++ b/arch/x86/kernel/crash_dump_64.c +@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + return -ENOMEM; + + if (userbuf) { +- if (copy_to_user(buf, vaddr + offset, csize)) { ++ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } +diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c +index 37250fe..bf2ec74 100644 +--- a/arch/x86/kernel/doublefault_32.c ++++ b/arch/x86/kernel/doublefault_32.c +@@ -11,7 +11,7 @@ + + #define DOUBLEFAULT_STACKSIZE (1024) + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; +-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) ++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) + + #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) + +@@ -21,7 +21,7 @@ static void doublefault_fn(void) + unsigned long gdt, tss; + + store_gdt(&gdt_desc); +- gdt = gdt_desc.address; ++ gdt = (unsigned long)gdt_desc.address; + + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + +@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, +- .es = __USER_DS, ++ .es = __KERNEL_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, +- .ds = __USER_DS, ++ .ds = __KERNEL_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index c8797d5..c605e53 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -2,6 +2,9 @@ + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/kprobes.h> + #include <linux/uaccess.h> +@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable) + static void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { +- struct task_struct *task; + unsigned long ret_addr; + int index; + + if (addr != (unsigned long)return_to_handler) + return; + +- task = tinfo->task; + index = task->curr_ret_stack; + + if (!task->ret_stack || index < *graph) +@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + static inline void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { } + #endif + +@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack + */ + +-static inline int valid_stack_ptr(struct thread_info *tinfo, +- void *p, unsigned int size, void *end) ++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end) + { +- void *t = tinfo; + if (end) { + if (p < end && p >= (end-THREAD_SIZE)) + return 1; +@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, + } + + unsigned long +-print_context_stack(struct thread_info *tinfo, ++print_context_stack(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) + { + struct stack_frame *frame = (struct stack_frame *)bp; + +- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { ++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) { + unsigned long addr; + + addr = *stack; +@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo, + } else { + ops->address(data, addr, 0); + } +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + stack++; + } +@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo, + EXPORT_SYMBOL_GPL(print_context_stack); + + unsigned long +-print_context_stack_bp(struct thread_info *tinfo, ++print_context_stack_bp(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) +@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo, + struct stack_frame *frame = (struct stack_frame *)bp; + unsigned long *ret_addr = &frame->return_address; + +- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { ++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) { + unsigned long addr = *ret_addr; + + if (!__kernel_text_address(addr)) +@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo, + ops->address(data, addr, 1); + frame = frame->next_frame; + ret_addr = &frame->return_address; +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + + return (unsigned long)frame; +@@ -189,7 +188,7 @@ void dump_stack(void) + + bp = stack_frame(current, NULL); + printk("Pid: %d, comm: %.20s %s %s %.*s\n", +- current->pid, current->comm, print_tainted(), ++ task_pid_nr(current), current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); +@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void) + } + EXPORT_SYMBOL_GPL(oops_begin); + ++extern void gr_handle_kernel_exploit(void); ++ + void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + { + if (regs && kexec_should_crash(current)) +@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); +- do_exit(signr); ++ ++ gr_handle_kernel_exploit(); ++ ++ do_group_exit(signr); + } + + int __kprobes __die(const char *str, struct pt_regs *regs, long err) +@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) + print_modules(); + show_regs(regs); + #ifdef CONFIG_X86_32 +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + } else { +@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err) + unsigned long flags = oops_begin(); + int sig = SIGSEGV; + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + report_bug(regs->ip, regs); + + if (__die(str, regs, err)) +diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c +index 1038a41..db2c12b 100644 +--- a/arch/x86/kernel/dumpstack_32.c ++++ b/arch/x86/kernel/dumpstack_32.c +@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + bp = stack_frame(task, regs); + + for (;;) { +- struct thread_info *context; ++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); + +- context = (struct thread_info *) +- ((unsigned long)stack & (~(THREAD_SIZE - 1))); +- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); + +- stack = (unsigned long *)context->previous_esp; +- if (!stack) ++ if (stack_start == task_stack_page(task)) + break; ++ stack = *(unsigned long **)stack_start; + if (ops->stack(data, "IRQ") < 0) + break; + touch_nmi_watchdog(); +@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs) + { + int i; + +- __show_regs(regs, !user_mode_vm(regs)); ++ __show_regs(regs, !user_mode(regs)); + + pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", + TASK_COMM_LEN, current->comm, task_pid_nr(current), +@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs) + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + u8 *ip; ++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]); + + pr_emerg("Stack:\n"); + show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); + + pr_emerg("Code:"); + +- ip = (u8 *)regs->ip - code_prologue; ++ ip = (u8 *)regs->ip - code_prologue + cs_base; + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { + /* try starting at IP */ +- ip = (u8 *)regs->ip; ++ ip = (u8 *)regs->ip + cs_base; + code_len = code_len - code_prologue + 1; + } + for (i = 0; i < code_len; i++, ip++) { +@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs) + pr_cont(" Bad EIP value."); + break; + } +- if (ip == (u8 *)regs->ip) ++ if (ip == (u8 *)regs->ip + cs_base) + pr_cont(" <%02x>", c); + else + pr_cont(" %02x", c); +@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip) + { + unsigned short ud2; + ++ ip = ktla_ktva(ip); + if (ip < PAGE_OFFSET) + return 0; + if (probe_kernel_address((unsigned short *)ip, ud2)) +@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_left; ++ ++ /* all kernel stacks are of the same size */ ++ stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c +index b653675..51cc8c0 100644 +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *irq_stack_end = + (unsigned long *)per_cpu(irq_stack_ptr, cpu); + unsigned used = 0; +- struct thread_info *tinfo; + int graph = 0; + unsigned long dummy; ++ void *stack_start; + + if (!task) + task = current; +@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + * current stack address. If the stacks consist of nested + * exceptions + */ +- tinfo = task_thread_info(task); + for (;;) { + char *id; + unsigned long *estack_end; ++ + estack_end = in_exception_stack(cpu, (unsigned long)stack, + &used, &id); + +@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (ops->stack(data, id) < 0) + break; + +- bp = ops->walk_stack(tinfo, stack, bp, ops, ++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops, + data, estack_end, &graph); + ops->stack(data, "<EOE>"); + /* +@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + * second-to-last pointer (index -2 to end) in the + * exception stack: + */ ++ if ((u16)estack_end[-1] != __KERNEL_DS) ++ goto out; + stack = (unsigned long *) estack_end[-2]; + continue; + } +@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (in_irq_stack(stack, irq_stack, irq_stack_end)) { + if (ops->stack(data, "IRQ") < 0) + break; +- bp = ops->walk_stack(tinfo, stack, bp, ++ bp = ops->walk_stack(task, irq_stack, stack, bp, + ops, data, irq_stack_end, &graph); + /* + * We link to the next stack (which would be +@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + /* + * This handles the process stack: + */ +- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); ++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); ++out: + put_cpu(); + } + EXPORT_SYMBOL(dump_trace); +@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs) + { + int i; + unsigned long sp; +- const int cpu = smp_processor_id(); ++ const int cpu = raw_smp_processor_id(); + struct task_struct *cur = current; + + sp = regs->sp; +@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end; ++ unsigned cpu, used; ++ char *id; ++ ++ /* check the process stack first */ ++ stack_start = (unsigned long)task_stack_page(current); ++ stack_end = stack_start + THREAD_SIZE; ++ if (likely(stack_start <= sp && sp < stack_end)) { ++ unsigned long stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ cpu = get_cpu(); ++ ++ /* check the irq stacks */ ++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); ++ stack_start = stack_end - IRQ_STACK_SIZE; ++ if (stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ /* check the exception stacks */ ++ used = 0; ++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id); ++ stack_start = stack_end - EXCEPTION_STKSZ; ++ if (stack_end && stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ put_cpu(); ++ ++ /* unknown stack */ ++ BUG(); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c +index d32abea..74daf4f 100644 +--- a/arch/x86/kernel/e820.c ++++ b/arch/x86/kernel/e820.c +@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void) + + static void early_panic(char *msg) + { +- early_printk(msg); +- panic(msg); ++ early_printk("%s", msg); ++ panic("%s", msg); + } + + static int userdef __initdata; +diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c +index 9b9f18b..9fcaa04 100644 +--- a/arch/x86/kernel/early_printk.c ++++ b/arch/x86/kernel/early_printk.c +@@ -7,6 +7,7 @@ + #include <linux/pci_regs.h> + #include <linux/pci_ids.h> + #include <linux/errno.h> ++#include <linux/sched.h> + #include <asm/io.h> + #include <asm/processor.h> + #include <asm/fcntl.h> +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index 8f3e2de..caecc4e 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -177,13 +177,153 @@ + /*CFI_REL_OFFSET gs, PT_GS*/ + .endm + .macro SET_KERNEL_GS reg ++ ++#ifdef CONFIG_CC_STACKPROTECTOR + movl $(__KERNEL_STACK_CANARY), \reg ++#elif defined(CONFIG_PAX_MEMORY_UDEREF) ++ movl $(__USER_DS), \reg ++#else ++ xorl \reg, \reg ++#endif ++ + movl \reg, %gs + .endm + + #endif /* CONFIG_X86_32_LAZY_GS */ + +-.macro SAVE_ALL ++.macro pax_enter_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_enter_kernel ++#endif ++.endm ++ ++.macro pax_exit_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_exit_kernel ++#endif ++.endm ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ENTRY(pax_enter_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ bts $16, %esi ++ jnc 1f ++ mov %cs, %esi ++ cmp $__KERNEL_CS, %esi ++ jz 3f ++ ljmp $__KERNEL_CS, $3f ++1: ljmp $__KERNEXEC_KERNEL_CS, $2f ++2: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++#else ++ mov %esi, %cr0 ++#endif ++3: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++#endif ++ mov %cs, %esi ++ cmp $__KERNEXEC_KERNEL_CS, %esi ++ jnz 2f ++#ifdef CONFIG_PARAVIRT ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ btr $16, %esi ++ ljmp $__KERNEL_CS, $1f ++1: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); ++#else ++ mov %esi, %cr0 ++#endif ++2: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_exit_kernel) ++#endif ++ ++ .macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++/* ++ * ebp: thread_info ++ */ ++ENTRY(pax_erase_kstack) ++ pushl %edi ++ pushl %ecx ++ pushl %eax ++ ++ mov TI_lowest_stack(%ebp), %edi ++ mov $-0xBEEF, %eax ++ std ++ ++1: mov %edi, %ecx ++ and $THREAD_SIZE_asm - 1, %ecx ++ shr $2, %ecx ++ repne scasl ++ jecxz 2f ++ ++ cmp $2*16, %ecx ++ jc 2f ++ ++ mov $2*16, %ecx ++ repe scasl ++ jecxz 2f ++ jne 1b ++ ++2: cld ++ mov %esp, %ecx ++ sub %edi, %ecx ++ ++ cmp $THREAD_SIZE_asm, %ecx ++ jb 3f ++ ud2 ++3: ++ ++ shr $2, %ecx ++ rep stosl ++ ++ mov TI_task_thread_sp0(%ebp), %edi ++ sub $128, %edi ++ mov %edi, TI_lowest_stack(%ebp) ++ ++ popl %eax ++ popl %ecx ++ popl %edi ++ ret ++ENDPROC(pax_erase_kstack) ++#endif ++ ++.macro __SAVE_ALL _DS + cld + PUSH_GS + pushl_cfi %fs +@@ -206,7 +346,7 @@ + CFI_REL_OFFSET ecx, 0 + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 +- movl $(__USER_DS), %edx ++ movl $\_DS, %edx + movl %edx, %ds + movl %edx, %es + movl $(__KERNEL_PERCPU), %edx +@@ -214,6 +354,15 @@ + SET_KERNEL_GS %edx + .endm + ++.macro SAVE_ALL ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ __SAVE_ALL __KERNEL_DS ++ pax_enter_kernel ++#else ++ __SAVE_ALL __USER_DS ++#endif ++.endm ++ + .macro RESTORE_INT_REGS + popl_cfi %ebx + CFI_RESTORE ebx +@@ -297,7 +446,7 @@ ENTRY(ret_from_fork) + popfl_cfi + jmp syscall_exit + CFI_ENDPROC +-END(ret_from_fork) ++ENDPROC(ret_from_fork) + + ENTRY(ret_from_kernel_thread) + CFI_STARTPROC +@@ -344,7 +493,15 @@ ret_from_intr: + andl $SEGMENT_RPL_MASK, %eax + #endif + cmpl $USER_RPL, %eax ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jae resume_userspace ++ ++ pax_exit_kernel ++ jmp resume_kernel ++#else + jb resume_kernel # not returning to v8086 or userspace ++#endif + + ENTRY(resume_userspace) + LOCKDEP_SYS_EXIT +@@ -356,8 +513,8 @@ ENTRY(resume_userspace) + andl $_TIF_WORK_MASK, %ecx # is there any work to be done on + # int/exception return? + jne work_pending +- jmp restore_all +-END(ret_from_exception) ++ jmp restore_all_pax ++ENDPROC(ret_from_exception) + + #ifdef CONFIG_PREEMPT + ENTRY(resume_kernel) +@@ -372,7 +529,7 @@ need_resched: + jz restore_all + call preempt_schedule_irq + jmp need_resched +-END(resume_kernel) ++ENDPROC(resume_kernel) + #endif + CFI_ENDPROC + /* +@@ -406,30 +563,45 @@ sysenter_past_esp: + /*CFI_REL_OFFSET cs, 0*/ + /* + * Push current_thread_info()->sysenter_return to the stack. +- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words +- * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ +- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) ++ pushl_cfi $0 + CFI_REL_OFFSET eip, 0 + + pushl_cfi %eax + SAVE_ALL ++ GET_THREAD_INFO(%ebp) ++ movl TI_sysenter_return(%ebp),%ebp ++ movl %ebp,PT_EIP(%esp) + ENABLE_INTERRUPTS(CLBR_NONE) + + /* + * Load the potential sixth argument from user stack. + * Careful about security. + */ ++ movl PT_OLDESP(%esp),%ebp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov PT_OLDSS(%esp),%ds ++1: movl %ds:(%ebp),%ebp ++ push %ss ++ pop %ds ++#else + cmpl $__PAGE_OFFSET-3,%ebp + jae syscall_fault + ASM_STAC + 1: movl (%ebp),%ebp + ASM_CLAC ++#endif ++ + movl %ebp,PT_EBP(%esp) + _ASM_EXTABLE(1b,syscall_fault) + + GET_THREAD_INFO(%ebp) + ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + jnz sysenter_audit + sysenter_do_call: +@@ -444,12 +616,24 @@ sysenter_do_call: + testl $_TIF_ALLWORK_MASK, %ecx + jne sysexit_audit + sysenter_exit: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushl_cfi %eax ++ movl %esp, %eax ++ call pax_randomize_kstack ++ popl_cfi %eax ++#endif ++ ++ pax_erase_kstack ++ + /* if something modifies registers it must also disable sysexit */ + movl PT_EIP(%esp), %edx + movl PT_OLDESP(%esp), %ecx + xorl %ebp,%ebp + TRACE_IRQS_ON + 1: mov PT_FS(%esp), %fs ++2: mov PT_DS(%esp), %ds ++3: mov PT_ES(%esp), %es + PTGS_TO_GS + ENABLE_INTERRUPTS_SYSEXIT + +@@ -466,6 +650,9 @@ sysenter_audit: + movl %eax,%edx /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ + call __audit_syscall_entry ++ ++ pax_erase_kstack ++ + pushl_cfi %ebx + movl PT_EAX(%esp),%eax /* reload syscall number */ + jmp sysenter_do_call +@@ -491,10 +678,16 @@ sysexit_audit: + + CFI_ENDPROC + .pushsection .fixup,"ax" +-2: movl $0,PT_FS(%esp) ++4: movl $0,PT_FS(%esp) ++ jmp 1b ++5: movl $0,PT_DS(%esp) ++ jmp 1b ++6: movl $0,PT_ES(%esp) + jmp 1b + .popsection +- _ASM_EXTABLE(1b,2b) ++ _ASM_EXTABLE(1b,4b) ++ _ASM_EXTABLE(2b,5b) ++ _ASM_EXTABLE(3b,6b) + PTGS_TO_GS_EX + ENDPROC(ia32_sysenter_target) + +@@ -509,6 +702,11 @@ ENTRY(system_call) + pushl_cfi %eax # save orig_eax + SAVE_ALL + GET_THREAD_INFO(%ebp) ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + # system call tracing in operation / emulation + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + jnz syscall_trace_entry +@@ -527,6 +725,15 @@ syscall_exit: + testl $_TIF_ALLWORK_MASK, %ecx # current->work + jne syscall_exit_work + ++restore_all_pax: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ movl %esp, %eax ++ call pax_randomize_kstack ++#endif ++ ++ pax_erase_kstack ++ + restore_all: + TRACE_IRQS_IRET + restore_all_notrace: +@@ -583,14 +790,34 @@ ldt_ss: + * compensating for the offset by changing to the ESPFIX segment with + * a base address that matches for the difference. + */ +-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) ++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) + mov %esp, %edx /* load kernel esp */ + mov PT_OLDESP(%esp), %eax /* load userspace esp */ + mov %dx, %ax /* eax: new kernel esp */ + sub %eax, %edx /* offset (low word is 0) */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif + shr $16, %edx +- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ +- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %esi ++ btr $16, %esi ++ mov %esi, %cr0 ++#endif ++ ++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ ++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ bts $16, %esi ++ mov %esi, %cr0 ++#endif ++ + pushl_cfi $__ESPFIX_SS + pushl_cfi %eax /* new kernel esp */ + /* Disable interrupts, but do not irqtrace this section: we +@@ -619,20 +846,18 @@ work_resched: + movl TI_flags(%ebp), %ecx + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? +- jz restore_all ++ jz restore_all_pax + testb $_TIF_NEED_RESCHED, %cl + jnz work_resched + + work_notifysig: # deal with pending signals and + # notify-resume requests ++ movl %esp, %eax + #ifdef CONFIG_VM86 + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) +- movl %esp, %eax + jne work_notifysig_v86 # returning to kernel-space or + # vm86-space + 1: +-#else +- movl %esp, %eax + #endif + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +@@ -653,7 +878,7 @@ work_notifysig_v86: + movl %eax, %esp + jmp 1b + #endif +-END(work_pending) ++ENDPROC(work_pending) + + # perform syscall exit tracing + ALIGN +@@ -661,11 +886,14 @@ syscall_trace_entry: + movl $-ENOSYS,PT_EAX(%esp) + movl %esp, %eax + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + /* What it returned is what we'll actually use. */ + cmpl $(NR_syscalls), %eax + jnae syscall_call + jmp syscall_exit +-END(syscall_trace_entry) ++ENDPROC(syscall_trace_entry) + + # perform syscall exit tracing + ALIGN +@@ -678,21 +906,25 @@ syscall_exit_work: + movl %esp, %eax + call syscall_trace_leave + jmp resume_userspace +-END(syscall_exit_work) ++ENDPROC(syscall_exit_work) + CFI_ENDPROC + + RING0_INT_FRAME # can't unwind into user space anyway + syscall_fault: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %ss ++ pop %ds ++#endif + ASM_CLAC + GET_THREAD_INFO(%ebp) + movl $-EFAULT,PT_EAX(%esp) + jmp resume_userspace +-END(syscall_fault) ++ENDPROC(syscall_fault) + + syscall_badsys: + movl $-ENOSYS,PT_EAX(%esp) + jmp resume_userspace +-END(syscall_badsys) ++ENDPROC(syscall_badsys) + CFI_ENDPROC + /* + * End of kprobes section +@@ -708,8 +940,15 @@ END(syscall_badsys) + * normal stack and adjusts ESP with the matching offset. + */ + /* fixup the stack */ +- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ +- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif ++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ ++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ + shl $16, %eax + addl %esp, %eax /* the adjusted stack pointer */ + pushl_cfi $__KERNEL_DS +@@ -762,7 +1001,7 @@ vector=vector+1 + .endr + 2: jmp common_interrupt + .endr +-END(irq_entries_start) ++ENDPROC(irq_entries_start) + + .previous + END(interrupt) +@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error) + pushl_cfi $do_coprocessor_error + jmp error_code + CFI_ENDPROC +-END(coprocessor_error) ++ENDPROC(coprocessor_error) + + ENTRY(simd_coprocessor_error) + RING0_INT_FRAME +@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error) + #endif + jmp error_code + CFI_ENDPROC +-END(simd_coprocessor_error) ++ENDPROC(simd_coprocessor_error) + + ENTRY(device_not_available) + RING0_INT_FRAME +@@ -844,18 +1083,18 @@ ENTRY(device_not_available) + pushl_cfi $do_device_not_available + jmp error_code + CFI_ENDPROC +-END(device_not_available) ++ENDPROC(device_not_available) + + #ifdef CONFIG_PARAVIRT + ENTRY(native_iret) + iret + _ASM_EXTABLE(native_iret, iret_exc) +-END(native_iret) ++ENDPROC(native_iret) + + ENTRY(native_irq_enable_sysexit) + sti + sysexit +-END(native_irq_enable_sysexit) ++ENDPROC(native_irq_enable_sysexit) + #endif + + ENTRY(overflow) +@@ -865,7 +1104,7 @@ ENTRY(overflow) + pushl_cfi $do_overflow + jmp error_code + CFI_ENDPROC +-END(overflow) ++ENDPROC(overflow) + + ENTRY(bounds) + RING0_INT_FRAME +@@ -874,7 +1113,7 @@ ENTRY(bounds) + pushl_cfi $do_bounds + jmp error_code + CFI_ENDPROC +-END(bounds) ++ENDPROC(bounds) + + ENTRY(invalid_op) + RING0_INT_FRAME +@@ -883,7 +1122,7 @@ ENTRY(invalid_op) + pushl_cfi $do_invalid_op + jmp error_code + CFI_ENDPROC +-END(invalid_op) ++ENDPROC(invalid_op) + + ENTRY(coprocessor_segment_overrun) + RING0_INT_FRAME +@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun) + pushl_cfi $do_coprocessor_segment_overrun + jmp error_code + CFI_ENDPROC +-END(coprocessor_segment_overrun) ++ENDPROC(coprocessor_segment_overrun) + + ENTRY(invalid_TSS) + RING0_EC_FRAME +@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS) + pushl_cfi $do_invalid_TSS + jmp error_code + CFI_ENDPROC +-END(invalid_TSS) ++ENDPROC(invalid_TSS) + + ENTRY(segment_not_present) + RING0_EC_FRAME +@@ -908,7 +1147,7 @@ ENTRY(segment_not_present) + pushl_cfi $do_segment_not_present + jmp error_code + CFI_ENDPROC +-END(segment_not_present) ++ENDPROC(segment_not_present) + + ENTRY(stack_segment) + RING0_EC_FRAME +@@ -916,7 +1155,7 @@ ENTRY(stack_segment) + pushl_cfi $do_stack_segment + jmp error_code + CFI_ENDPROC +-END(stack_segment) ++ENDPROC(stack_segment) + + ENTRY(alignment_check) + RING0_EC_FRAME +@@ -924,7 +1163,7 @@ ENTRY(alignment_check) + pushl_cfi $do_alignment_check + jmp error_code + CFI_ENDPROC +-END(alignment_check) ++ENDPROC(alignment_check) + + ENTRY(divide_error) + RING0_INT_FRAME +@@ -933,7 +1172,7 @@ ENTRY(divide_error) + pushl_cfi $do_divide_error + jmp error_code + CFI_ENDPROC +-END(divide_error) ++ENDPROC(divide_error) + + #ifdef CONFIG_X86_MCE + ENTRY(machine_check) +@@ -943,7 +1182,7 @@ ENTRY(machine_check) + pushl_cfi machine_check_vector + jmp error_code + CFI_ENDPROC +-END(machine_check) ++ENDPROC(machine_check) + #endif + + ENTRY(spurious_interrupt_bug) +@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug) + pushl_cfi $do_spurious_interrupt_bug + jmp error_code + CFI_ENDPROC +-END(spurious_interrupt_bug) ++ENDPROC(spurious_interrupt_bug) + /* + * End of kprobes section + */ +@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, + + ENTRY(mcount) + ret +-END(mcount) ++ENDPROC(mcount) + + ENTRY(ftrace_caller) + cmpl $0, function_trace_stop +@@ -1096,7 +1335,7 @@ ftrace_graph_call: + .globl ftrace_stub + ftrace_stub: + ret +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ +@@ -1197,7 +1436,7 @@ trace: + popl %ecx + popl %eax + jmp ftrace_stub +-END(mcount) ++ENDPROC(mcount) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller) + popl %ecx + popl %eax + ret +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + .globl return_to_handler + return_to_handler: +@@ -1271,15 +1510,18 @@ error_code: + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart + REG_TO_PTGS %ecx + SET_KERNEL_GS %ecx +- movl $(__USER_DS), %ecx ++ movl $(__KERNEL_DS), %ecx + movl %ecx, %ds + movl %ecx, %es ++ ++ pax_enter_kernel ++ + TRACE_IRQS_OFF + movl %esp,%eax # pt_regs pointer + call *%edi + jmp ret_from_exception + CFI_ENDPROC +-END(page_fault) ++ENDPROC(page_fault) + + /* + * Debug traps and NMI can happen at the one SYSENTER instruction +@@ -1322,7 +1564,7 @@ debug_stack_correct: + call do_debug + jmp ret_from_exception + CFI_ENDPROC +-END(debug) ++ENDPROC(debug) + + /* + * NMI is doubly nasty. It can happen _while_ we're handling +@@ -1360,6 +1602,9 @@ nmi_stack_correct: + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_nmi ++ ++ pax_exit_kernel ++ + jmp restore_all_notrace + CFI_ENDPROC + +@@ -1396,12 +1641,15 @@ nmi_espfix_stack: + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi ++ ++ pax_exit_kernel ++ + RESTORE_REGS + lss 12+4(%esp), %esp # back to espfix stack + CFI_ADJUST_CFA_OFFSET -24 + jmp irq_return + CFI_ENDPROC +-END(nmi) ++ENDPROC(nmi) + + ENTRY(int3) + RING0_INT_FRAME +@@ -1414,14 +1662,14 @@ ENTRY(int3) + call do_int3 + jmp ret_from_exception + CFI_ENDPROC +-END(int3) ++ENDPROC(int3) + + ENTRY(general_protection) + RING0_EC_FRAME + pushl_cfi $do_general_protection + jmp error_code + CFI_ENDPROC +-END(general_protection) ++ENDPROC(general_protection) + + #ifdef CONFIG_KVM_GUEST + ENTRY(async_page_fault) +@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault) + pushl_cfi $do_async_page_fault + jmp error_code + CFI_ENDPROC +-END(async_page_fault) ++ENDPROC(async_page_fault) + #endif + + /* +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index c1d01e6..a88cf02 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -59,6 +59,8 @@ + #include <asm/context_tracking.h> + #include <asm/smap.h> + #include <linux/err.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -80,8 +82,9 @@ + #ifdef CONFIG_DYNAMIC_FTRACE + + ENTRY(function_hook) ++ pax_force_retaddr + retq +-END(function_hook) ++ENDPROC(function_hook) + + /* skip is set if stack has been adjusted */ + .macro ftrace_caller_setup skip=0 +@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + ENTRY(ftrace_regs_caller) + /* Save the current flags before compare (in SS location)*/ +@@ -191,7 +195,7 @@ ftrace_restore_flags: + popfq + jmp ftrace_stub + +-END(ftrace_regs_caller) ++ENDPROC(ftrace_regs_caller) + + + #else /* ! CONFIG_DYNAMIC_FTRACE */ +@@ -212,6 +216,7 @@ ENTRY(function_hook) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq + + trace: +@@ -225,12 +230,13 @@ trace: + #endif + subq $MCOUNT_INSN_SIZE, %rdi + ++ pax_force_fptr ftrace_trace_function + call *ftrace_trace_function + + MCOUNT_RESTORE_FRAME + + jmp ftrace_stub +-END(function_hook) ++ENDPROC(function_hook) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller) + + MCOUNT_RESTORE_FRAME + ++ pax_force_retaddr + retq +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + GLOBAL(return_to_handler) + subq $24, %rsp +@@ -269,7 +276,9 @@ GLOBAL(return_to_handler) + movq 8(%rsp), %rdx + movq (%rsp), %rax + addq $24, %rsp ++ pax_force_fptr %rdi + jmp *%rdi ++ENDPROC(return_to_handler) + #endif + + +@@ -284,6 +293,309 @@ ENTRY(native_usergs_sysret64) + ENDPROC(native_usergs_sysret64) + #endif /* CONFIG_PARAVIRT */ + ++ .macro ljmpq sel, off ++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) ++ .byte 0x48; ljmp *1234f(%rip) ++ .pushsection .rodata ++ .align 16 ++ 1234: .quad \off; .word \sel ++ .popsection ++#else ++ pushq $\sel ++ pushq $\off ++ lretq ++#endif ++ .endm ++ ++ .macro pax_enter_kernel ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_enter_kernel ++#endif ++ .endm ++ ++ .macro pax_exit_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_exit_kernel ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ENTRY(pax_enter_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 3f ++ mov %cs,%edi ++ cmp $__KERNEL_CS,%edi ++ jnz 2f ++1: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ ++2: ljmpq __KERNEL_CS,1b ++3: ljmpq __KERNEXEC_KERNEL_CS,4f ++4: SET_RDI_INTO_CR0 ++ jmp 1b ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ mov %cs,%rdi ++ cmp $__KERNEXEC_KERNEL_CS,%edi ++ jz 2f ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 4f ++1: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI); ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq |